2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/slab.h>
32 #include <linux/types.h>
34 #include <asm/byteorder.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_probe_helper.h>
42 #include "i915_debugfs.h"
44 #include "i915_trace.h"
45 #include "intel_atomic.h"
46 #include "intel_audio.h"
47 #include "intel_connector.h"
48 #include "intel_ddi.h"
49 #include "intel_display_types.h"
51 #include "intel_dp_link_training.h"
52 #include "intel_dp_mst.h"
53 #include "intel_dpio_phy.h"
54 #include "intel_fifo_underrun.h"
55 #include "intel_hdcp.h"
56 #include "intel_hdmi.h"
57 #include "intel_hotplug.h"
58 #include "intel_lspcon.h"
59 #include "intel_lvds.h"
60 #include "intel_panel.h"
61 #include "intel_psr.h"
62 #include "intel_sideband.h"
64 #include "intel_vdsc.h"
66 #define DP_DPRX_ESI_LEN 14
68 /* DP DSC throughput values used for slice count calculations KPixels/s */
69 #define DP_DSC_PEAK_PIXEL_RATE 2720000
70 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
71 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
73 /* DP DSC FEC Overhead factor = 1/(0.972261) */
74 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261
76 /* Compliance test status bits */
77 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
78 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
79 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
80 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
87 static const struct dp_link_dpll g4x_dpll
[] = {
89 { .p1
= 2, .p2
= 10, .n
= 2, .m1
= 23, .m2
= 8 } },
91 { .p1
= 1, .p2
= 10, .n
= 1, .m1
= 14, .m2
= 2 } }
94 static const struct dp_link_dpll pch_dpll
[] = {
96 { .p1
= 2, .p2
= 10, .n
= 1, .m1
= 12, .m2
= 9 } },
98 { .p1
= 1, .p2
= 10, .n
= 2, .m1
= 14, .m2
= 8 } }
101 static const struct dp_link_dpll vlv_dpll
[] = {
103 { .p1
= 3, .p2
= 2, .n
= 5, .m1
= 3, .m2
= 81 } },
105 { .p1
= 2, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 27 } }
109 * CHV supports eDP 1.4 that have more link rates.
110 * Below only provides the fixed rate but exclude variable rate.
112 static const struct dp_link_dpll chv_dpll
[] = {
114 * CHV requires to program fractional division for m2.
115 * m2 is stored in fixed point format using formula below
116 * (m2_int << 22) | m2_fraction
118 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
119 { .p1
= 4, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 0x819999a } },
120 { 270000, /* m2_int = 27, m2_fraction = 0 */
121 { .p1
= 4, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } },
124 /* Constants for DP DSC configurations */
125 static const u8 valid_dsc_bpp
[] = {6, 8, 10, 12, 15};
127 /* With Single pipe configuration, HW is capable of supporting maximum
128 * of 4 slices per line.
130 static const u8 valid_dsc_slicecount
[] = {1, 2, 4};
133 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
134 * @intel_dp: DP struct
136 * If a CPU or PCH DP output is attached to an eDP panel, this function
137 * will return true, and false otherwise.
139 bool intel_dp_is_edp(struct intel_dp
*intel_dp
)
141 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
143 return dig_port
->base
.type
== INTEL_OUTPUT_EDP
;
146 static void intel_dp_link_down(struct intel_encoder
*encoder
,
147 const struct intel_crtc_state
*old_crtc_state
);
148 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
);
149 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
);
150 static void vlv_init_panel_power_sequencer(struct intel_encoder
*encoder
,
151 const struct intel_crtc_state
*crtc_state
);
152 static void vlv_steal_power_sequencer(struct drm_i915_private
*dev_priv
,
154 static void intel_dp_unset_edid(struct intel_dp
*intel_dp
);
156 /* update sink rates from dpcd */
157 static void intel_dp_set_sink_rates(struct intel_dp
*intel_dp
)
159 static const int dp_rates
[] = {
160 162000, 270000, 540000, 810000
165 if (drm_dp_has_quirk(&intel_dp
->desc
, 0,
166 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS
)) {
167 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
168 static const int quirk_rates
[] = { 162000, 270000, 324000 };
170 memcpy(intel_dp
->sink_rates
, quirk_rates
, sizeof(quirk_rates
));
171 intel_dp
->num_sink_rates
= ARRAY_SIZE(quirk_rates
);
176 max_rate
= drm_dp_bw_code_to_link_rate(intel_dp
->dpcd
[DP_MAX_LINK_RATE
]);
177 max_lttpr_rate
= drm_dp_lttpr_max_link_rate(intel_dp
->lttpr_common_caps
);
179 max_rate
= min(max_rate
, max_lttpr_rate
);
181 for (i
= 0; i
< ARRAY_SIZE(dp_rates
); i
++) {
182 if (dp_rates
[i
] > max_rate
)
184 intel_dp
->sink_rates
[i
] = dp_rates
[i
];
187 intel_dp
->num_sink_rates
= i
;
190 /* Get length of rates array potentially limited by max_rate. */
191 static int intel_dp_rate_limit_len(const int *rates
, int len
, int max_rate
)
195 /* Limit results by potentially reduced max rate */
196 for (i
= 0; i
< len
; i
++) {
197 if (rates
[len
- i
- 1] <= max_rate
)
204 /* Get length of common rates array potentially limited by max_rate. */
205 static int intel_dp_common_len_rate_limit(const struct intel_dp
*intel_dp
,
208 return intel_dp_rate_limit_len(intel_dp
->common_rates
,
209 intel_dp
->num_common_rates
, max_rate
);
212 /* Theoretical max between source and sink */
213 static int intel_dp_max_common_rate(struct intel_dp
*intel_dp
)
215 return intel_dp
->common_rates
[intel_dp
->num_common_rates
- 1];
218 /* Theoretical max between source and sink */
219 static int intel_dp_max_common_lane_count(struct intel_dp
*intel_dp
)
221 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
222 int source_max
= dig_port
->max_lanes
;
223 int sink_max
= drm_dp_max_lane_count(intel_dp
->dpcd
);
224 int fia_max
= intel_tc_port_fia_max_lane_count(dig_port
);
225 int lttpr_max
= drm_dp_lttpr_max_lane_count(intel_dp
->lttpr_common_caps
);
228 sink_max
= min(sink_max
, lttpr_max
);
230 return min3(source_max
, sink_max
, fia_max
);
233 int intel_dp_max_lane_count(struct intel_dp
*intel_dp
)
235 return intel_dp
->max_link_lane_count
;
239 intel_dp_link_required(int pixel_clock
, int bpp
)
241 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
242 return DIV_ROUND_UP(pixel_clock
* bpp
, 8);
246 intel_dp_max_data_rate(int max_link_clock
, int max_lanes
)
248 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
249 * link rate that is generally expressed in Gbps. Since, 8 bits of data
250 * is transmitted every LS_Clk per lane, there is no need to account for
251 * the channel encoding that is done in the PHY layer here.
254 return max_link_clock
* max_lanes
;
257 bool intel_dp_can_bigjoiner(struct intel_dp
*intel_dp
)
259 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
260 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
261 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
263 return INTEL_GEN(dev_priv
) >= 12 ||
264 (INTEL_GEN(dev_priv
) == 11 &&
265 encoder
->port
!= PORT_A
);
268 static int cnl_max_source_rate(struct intel_dp
*intel_dp
)
270 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
271 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
272 enum port port
= dig_port
->base
.port
;
274 u32 voltage
= intel_de_read(dev_priv
, CNL_PORT_COMP_DW3
) & VOLTAGE_INFO_MASK
;
276 /* Low voltage SKUs are limited to max of 5.4G */
277 if (voltage
== VOLTAGE_INFO_0_85V
)
280 /* For this SKU 8.1G is supported in all ports */
281 if (IS_CNL_WITH_PORT_F(dev_priv
))
284 /* For other SKUs, max rate on ports A and D is 5.4G */
285 if (port
== PORT_A
|| port
== PORT_D
)
291 static int icl_max_source_rate(struct intel_dp
*intel_dp
)
293 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
294 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
295 enum phy phy
= intel_port_to_phy(dev_priv
, dig_port
->base
.port
);
297 if (intel_phy_is_combo(dev_priv
, phy
) &&
298 !intel_dp_is_edp(intel_dp
))
304 static int ehl_max_source_rate(struct intel_dp
*intel_dp
)
306 if (intel_dp_is_edp(intel_dp
))
313 intel_dp_set_source_rates(struct intel_dp
*intel_dp
)
315 /* The values must be in increasing order */
316 static const int cnl_rates
[] = {
317 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
319 static const int bxt_rates
[] = {
320 162000, 216000, 243000, 270000, 324000, 432000, 540000
322 static const int skl_rates
[] = {
323 162000, 216000, 270000, 324000, 432000, 540000
325 static const int hsw_rates
[] = {
326 162000, 270000, 540000
328 static const int g4x_rates
[] = {
331 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
332 struct intel_encoder
*encoder
= &dig_port
->base
;
333 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
334 const int *source_rates
;
335 int size
, max_rate
= 0, vbt_max_rate
;
337 /* This should only be done once */
338 drm_WARN_ON(&dev_priv
->drm
,
339 intel_dp
->source_rates
|| intel_dp
->num_source_rates
);
341 if (INTEL_GEN(dev_priv
) >= 10) {
342 source_rates
= cnl_rates
;
343 size
= ARRAY_SIZE(cnl_rates
);
344 if (IS_GEN(dev_priv
, 10))
345 max_rate
= cnl_max_source_rate(intel_dp
);
346 else if (IS_JSL_EHL(dev_priv
))
347 max_rate
= ehl_max_source_rate(intel_dp
);
349 max_rate
= icl_max_source_rate(intel_dp
);
350 } else if (IS_GEN9_LP(dev_priv
)) {
351 source_rates
= bxt_rates
;
352 size
= ARRAY_SIZE(bxt_rates
);
353 } else if (IS_GEN9_BC(dev_priv
)) {
354 source_rates
= skl_rates
;
355 size
= ARRAY_SIZE(skl_rates
);
356 } else if ((IS_HASWELL(dev_priv
) && !IS_HSW_ULX(dev_priv
)) ||
357 IS_BROADWELL(dev_priv
)) {
358 source_rates
= hsw_rates
;
359 size
= ARRAY_SIZE(hsw_rates
);
361 source_rates
= g4x_rates
;
362 size
= ARRAY_SIZE(g4x_rates
);
365 vbt_max_rate
= intel_bios_dp_max_link_rate(encoder
);
366 if (max_rate
&& vbt_max_rate
)
367 max_rate
= min(max_rate
, vbt_max_rate
);
368 else if (vbt_max_rate
)
369 max_rate
= vbt_max_rate
;
372 size
= intel_dp_rate_limit_len(source_rates
, size
, max_rate
);
374 intel_dp
->source_rates
= source_rates
;
375 intel_dp
->num_source_rates
= size
;
378 static int intersect_rates(const int *source_rates
, int source_len
,
379 const int *sink_rates
, int sink_len
,
382 int i
= 0, j
= 0, k
= 0;
384 while (i
< source_len
&& j
< sink_len
) {
385 if (source_rates
[i
] == sink_rates
[j
]) {
386 if (WARN_ON(k
>= DP_MAX_SUPPORTED_RATES
))
388 common_rates
[k
] = source_rates
[i
];
392 } else if (source_rates
[i
] < sink_rates
[j
]) {
401 /* return index of rate in rates array, or -1 if not found */
402 static int intel_dp_rate_index(const int *rates
, int len
, int rate
)
406 for (i
= 0; i
< len
; i
++)
407 if (rate
== rates
[i
])
413 static void intel_dp_set_common_rates(struct intel_dp
*intel_dp
)
415 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
417 drm_WARN_ON(&i915
->drm
,
418 !intel_dp
->num_source_rates
|| !intel_dp
->num_sink_rates
);
420 intel_dp
->num_common_rates
= intersect_rates(intel_dp
->source_rates
,
421 intel_dp
->num_source_rates
,
422 intel_dp
->sink_rates
,
423 intel_dp
->num_sink_rates
,
424 intel_dp
->common_rates
);
426 /* Paranoia, there should always be something in common. */
427 if (drm_WARN_ON(&i915
->drm
, intel_dp
->num_common_rates
== 0)) {
428 intel_dp
->common_rates
[0] = 162000;
429 intel_dp
->num_common_rates
= 1;
433 static bool intel_dp_link_params_valid(struct intel_dp
*intel_dp
, int link_rate
,
437 * FIXME: we need to synchronize the current link parameters with
438 * hardware readout. Currently fast link training doesn't work on
441 if (link_rate
== 0 ||
442 link_rate
> intel_dp
->max_link_rate
)
445 if (lane_count
== 0 ||
446 lane_count
> intel_dp_max_lane_count(intel_dp
))
452 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp
*intel_dp
,
456 const struct drm_display_mode
*fixed_mode
=
457 intel_dp
->attached_connector
->panel
.fixed_mode
;
458 int mode_rate
, max_rate
;
460 mode_rate
= intel_dp_link_required(fixed_mode
->clock
, 18);
461 max_rate
= intel_dp_max_data_rate(link_rate
, lane_count
);
462 if (mode_rate
> max_rate
)
468 int intel_dp_get_link_train_fallback_values(struct intel_dp
*intel_dp
,
469 int link_rate
, u8 lane_count
)
471 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
475 * TODO: Enable fallback on MST links once MST link compute can handle
476 * the fallback params.
478 if (intel_dp
->is_mst
) {
479 drm_err(&i915
->drm
, "Link Training Unsuccessful\n");
483 index
= intel_dp_rate_index(intel_dp
->common_rates
,
484 intel_dp
->num_common_rates
,
487 if (intel_dp_is_edp(intel_dp
) &&
488 !intel_dp_can_link_train_fallback_for_edp(intel_dp
,
489 intel_dp
->common_rates
[index
- 1],
491 drm_dbg_kms(&i915
->drm
,
492 "Retrying Link training for eDP with same parameters\n");
495 intel_dp
->max_link_rate
= intel_dp
->common_rates
[index
- 1];
496 intel_dp
->max_link_lane_count
= lane_count
;
497 } else if (lane_count
> 1) {
498 if (intel_dp_is_edp(intel_dp
) &&
499 !intel_dp_can_link_train_fallback_for_edp(intel_dp
,
500 intel_dp_max_common_rate(intel_dp
),
502 drm_dbg_kms(&i915
->drm
,
503 "Retrying Link training for eDP with same parameters\n");
506 intel_dp
->max_link_rate
= intel_dp_max_common_rate(intel_dp
);
507 intel_dp
->max_link_lane_count
= lane_count
>> 1;
509 drm_err(&i915
->drm
, "Link Training Unsuccessful\n");
516 u32
intel_dp_mode_to_fec_clock(u32 mode_clock
)
518 return div_u64(mul_u32_u32(mode_clock
, 1000000U),
519 DP_DSC_FEC_OVERHEAD_FACTOR
);
523 small_joiner_ram_size_bits(struct drm_i915_private
*i915
)
525 if (INTEL_GEN(i915
) >= 11)
531 static u16
intel_dp_dsc_get_output_bpp(struct drm_i915_private
*i915
,
532 u32 link_clock
, u32 lane_count
,
533 u32 mode_clock
, u32 mode_hdisplay
,
536 u32 bits_per_pixel
, max_bpp_small_joiner_ram
;
540 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
541 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
542 * for SST -> TimeSlotsPerMTP is 1,
543 * for MST -> TimeSlotsPerMTP has to be calculated
545 bits_per_pixel
= (link_clock
* lane_count
* 8) /
546 intel_dp_mode_to_fec_clock(mode_clock
);
547 drm_dbg_kms(&i915
->drm
, "Max link bpp: %u\n", bits_per_pixel
);
549 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
550 max_bpp_small_joiner_ram
= small_joiner_ram_size_bits(i915
) /
554 max_bpp_small_joiner_ram
*= 2;
556 drm_dbg_kms(&i915
->drm
, "Max small joiner bpp: %u\n",
557 max_bpp_small_joiner_ram
);
560 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
561 * check, output bpp from small joiner RAM check)
563 bits_per_pixel
= min(bits_per_pixel
, max_bpp_small_joiner_ram
);
566 u32 max_bpp_bigjoiner
=
567 i915
->max_cdclk_freq
* 48 /
568 intel_dp_mode_to_fec_clock(mode_clock
);
570 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner
);
571 bits_per_pixel
= min(bits_per_pixel
, max_bpp_bigjoiner
);
574 /* Error out if the max bpp is less than smallest allowed valid bpp */
575 if (bits_per_pixel
< valid_dsc_bpp
[0]) {
576 drm_dbg_kms(&i915
->drm
, "Unsupported BPP %u, min %u\n",
577 bits_per_pixel
, valid_dsc_bpp
[0]);
581 /* Find the nearest match in the array of known BPPs from VESA */
582 for (i
= 0; i
< ARRAY_SIZE(valid_dsc_bpp
) - 1; i
++) {
583 if (bits_per_pixel
< valid_dsc_bpp
[i
+ 1])
586 bits_per_pixel
= valid_dsc_bpp
[i
];
589 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
590 * fractional part is 0
592 return bits_per_pixel
<< 4;
595 static u8
intel_dp_dsc_get_slice_count(struct intel_dp
*intel_dp
,
596 int mode_clock
, int mode_hdisplay
,
599 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
600 u8 min_slice_count
, i
;
603 if (mode_clock
<= DP_DSC_PEAK_PIXEL_RATE
)
604 min_slice_count
= DIV_ROUND_UP(mode_clock
,
605 DP_DSC_MAX_ENC_THROUGHPUT_0
);
607 min_slice_count
= DIV_ROUND_UP(mode_clock
,
608 DP_DSC_MAX_ENC_THROUGHPUT_1
);
610 max_slice_width
= drm_dp_dsc_sink_max_slice_width(intel_dp
->dsc_dpcd
);
611 if (max_slice_width
< DP_DSC_MIN_SLICE_WIDTH_VALUE
) {
612 drm_dbg_kms(&i915
->drm
,
613 "Unsupported slice width %d by DP DSC Sink device\n",
617 /* Also take into account max slice width */
618 min_slice_count
= max_t(u8
, min_slice_count
,
619 DIV_ROUND_UP(mode_hdisplay
,
622 /* Find the closest match to the valid slice count values */
623 for (i
= 0; i
< ARRAY_SIZE(valid_dsc_slicecount
); i
++) {
624 u8 test_slice_count
= valid_dsc_slicecount
[i
] << bigjoiner
;
626 if (test_slice_count
>
627 drm_dp_dsc_sink_max_slice_count(intel_dp
->dsc_dpcd
, false))
630 /* big joiner needs small joiner to be enabled */
631 if (bigjoiner
&& test_slice_count
< 4)
634 if (min_slice_count
<= test_slice_count
)
635 return test_slice_count
;
638 drm_dbg_kms(&i915
->drm
, "Unsupported Slice Count %d\n",
643 static enum intel_output_format
644 intel_dp_output_format(struct drm_connector
*connector
,
645 const struct drm_display_mode
*mode
)
647 struct intel_dp
*intel_dp
= intel_attached_dp(to_intel_connector(connector
));
648 const struct drm_display_info
*info
= &connector
->display_info
;
650 if (!connector
->ycbcr_420_allowed
||
651 !drm_mode_is_420_only(info
, mode
))
652 return INTEL_OUTPUT_FORMAT_RGB
;
654 if (intel_dp
->dfp
.ycbcr_444_to_420
)
655 return INTEL_OUTPUT_FORMAT_YCBCR444
;
657 return INTEL_OUTPUT_FORMAT_YCBCR420
;
660 int intel_dp_min_bpp(enum intel_output_format output_format
)
662 if (output_format
== INTEL_OUTPUT_FORMAT_RGB
)
668 static int intel_dp_output_bpp(enum intel_output_format output_format
, int bpp
)
671 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
672 * format of the number of bytes per pixel will be half the number
673 * of bytes of RGB pixel.
675 if (output_format
== INTEL_OUTPUT_FORMAT_YCBCR420
)
682 intel_dp_mode_min_output_bpp(struct drm_connector
*connector
,
683 const struct drm_display_mode
*mode
)
685 enum intel_output_format output_format
=
686 intel_dp_output_format(connector
, mode
);
688 return intel_dp_output_bpp(output_format
, intel_dp_min_bpp(output_format
));
691 static bool intel_dp_hdisplay_bad(struct drm_i915_private
*dev_priv
,
695 * Older platforms don't like hdisplay==4096 with DP.
697 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
698 * and frame counter increment), but we don't get vblank interrupts,
699 * and the pipe underruns immediately. The link also doesn't seem
700 * to get trained properly.
702 * On CHV the vblank interrupts don't seem to disappear but
703 * otherwise the symptoms are similar.
705 * TODO: confirm the behaviour on HSW+
707 return hdisplay
== 4096 && !HAS_DDI(dev_priv
);
710 static enum drm_mode_status
711 intel_dp_mode_valid_downstream(struct intel_connector
*connector
,
712 const struct drm_display_mode
*mode
,
715 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
716 const struct drm_display_info
*info
= &connector
->base
.display_info
;
719 if (intel_dp
->dfp
.max_dotclock
&&
720 target_clock
> intel_dp
->dfp
.max_dotclock
)
721 return MODE_CLOCK_HIGH
;
723 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
724 tmds_clock
= target_clock
;
725 if (drm_mode_is_420_only(info
, mode
))
728 if (intel_dp
->dfp
.min_tmds_clock
&&
729 tmds_clock
< intel_dp
->dfp
.min_tmds_clock
)
730 return MODE_CLOCK_LOW
;
731 if (intel_dp
->dfp
.max_tmds_clock
&&
732 tmds_clock
> intel_dp
->dfp
.max_tmds_clock
)
733 return MODE_CLOCK_HIGH
;
738 static enum drm_mode_status
739 intel_dp_mode_valid(struct drm_connector
*connector
,
740 struct drm_display_mode
*mode
)
742 struct intel_dp
*intel_dp
= intel_attached_dp(to_intel_connector(connector
));
743 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
744 struct drm_display_mode
*fixed_mode
= intel_connector
->panel
.fixed_mode
;
745 struct drm_i915_private
*dev_priv
= to_i915(connector
->dev
);
746 int target_clock
= mode
->clock
;
747 int max_rate
, mode_rate
, max_lanes
, max_link_clock
;
748 int max_dotclk
= dev_priv
->max_dotclk_freq
;
749 u16 dsc_max_output_bpp
= 0;
750 u8 dsc_slice_count
= 0;
751 enum drm_mode_status status
;
752 bool dsc
= false, bigjoiner
= false;
754 if (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
)
755 return MODE_NO_DBLESCAN
;
757 if (mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
758 return MODE_H_ILLEGAL
;
760 if (intel_dp_is_edp(intel_dp
) && fixed_mode
) {
761 if (mode
->hdisplay
> fixed_mode
->hdisplay
)
764 if (mode
->vdisplay
> fixed_mode
->vdisplay
)
767 target_clock
= fixed_mode
->clock
;
770 if (mode
->clock
< 10000)
771 return MODE_CLOCK_LOW
;
773 if ((target_clock
> max_dotclk
|| mode
->hdisplay
> 5120) &&
774 intel_dp_can_bigjoiner(intel_dp
)) {
778 if (target_clock
> max_dotclk
)
779 return MODE_CLOCK_HIGH
;
781 max_link_clock
= intel_dp_max_link_rate(intel_dp
);
782 max_lanes
= intel_dp_max_lane_count(intel_dp
);
784 max_rate
= intel_dp_max_data_rate(max_link_clock
, max_lanes
);
785 mode_rate
= intel_dp_link_required(target_clock
,
786 intel_dp_mode_min_output_bpp(connector
, mode
));
788 if (intel_dp_hdisplay_bad(dev_priv
, mode
->hdisplay
))
789 return MODE_H_ILLEGAL
;
792 * Output bpp is stored in 6.4 format so right shift by 4 to get the
793 * integer value since we support only integer values of bpp.
795 if ((INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
)) &&
796 drm_dp_sink_supports_dsc(intel_dp
->dsc_dpcd
)) {
797 if (intel_dp_is_edp(intel_dp
)) {
799 drm_edp_dsc_sink_output_bpp(intel_dp
->dsc_dpcd
) >> 4;
801 drm_dp_dsc_sink_max_slice_count(intel_dp
->dsc_dpcd
,
803 } else if (drm_dp_sink_supports_fec(intel_dp
->fec_capable
)) {
805 intel_dp_dsc_get_output_bpp(dev_priv
,
812 intel_dp_dsc_get_slice_count(intel_dp
,
818 dsc
= dsc_max_output_bpp
&& dsc_slice_count
;
821 /* big joiner configuration needs DSC */
822 if (bigjoiner
&& !dsc
)
823 return MODE_CLOCK_HIGH
;
825 if (mode_rate
> max_rate
&& !dsc
)
826 return MODE_CLOCK_HIGH
;
828 status
= intel_dp_mode_valid_downstream(intel_connector
,
830 if (status
!= MODE_OK
)
833 return intel_mode_valid_max_plane_size(dev_priv
, mode
, bigjoiner
);
836 u32
intel_dp_pack_aux(const u8
*src
, int src_bytes
)
843 for (i
= 0; i
< src_bytes
; i
++)
844 v
|= ((u32
)src
[i
]) << ((3 - i
) * 8);
848 static void intel_dp_unpack_aux(u32 src
, u8
*dst
, int dst_bytes
)
853 for (i
= 0; i
< dst_bytes
; i
++)
854 dst
[i
] = src
>> ((3-i
) * 8);
858 intel_dp_init_panel_power_sequencer(struct intel_dp
*intel_dp
);
860 intel_dp_init_panel_power_sequencer_registers(struct intel_dp
*intel_dp
,
861 bool force_disable_vdd
);
863 intel_dp_pps_init(struct intel_dp
*intel_dp
);
865 static intel_wakeref_t
866 pps_lock(struct intel_dp
*intel_dp
)
868 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
869 intel_wakeref_t wakeref
;
872 * See intel_power_sequencer_reset() why we need
873 * a power domain reference here.
875 wakeref
= intel_display_power_get(dev_priv
,
876 intel_aux_power_domain(dp_to_dig_port(intel_dp
)));
878 mutex_lock(&dev_priv
->pps_mutex
);
883 static intel_wakeref_t
884 pps_unlock(struct intel_dp
*intel_dp
, intel_wakeref_t wakeref
)
886 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
888 mutex_unlock(&dev_priv
->pps_mutex
);
889 intel_display_power_put(dev_priv
,
890 intel_aux_power_domain(dp_to_dig_port(intel_dp
)),
895 #define with_pps_lock(dp, wf) \
896 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
899 vlv_power_sequencer_kick(struct intel_dp
*intel_dp
)
901 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
902 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
903 enum pipe pipe
= intel_dp
->pps_pipe
;
904 bool pll_enabled
, release_cl_override
= false;
905 enum dpio_phy phy
= DPIO_PHY(pipe
);
906 enum dpio_channel ch
= vlv_pipe_to_channel(pipe
);
909 if (drm_WARN(&dev_priv
->drm
,
910 intel_de_read(dev_priv
, intel_dp
->output_reg
) & DP_PORT_EN
,
911 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
912 pipe_name(pipe
), dig_port
->base
.base
.base
.id
,
913 dig_port
->base
.base
.name
))
916 drm_dbg_kms(&dev_priv
->drm
,
917 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
918 pipe_name(pipe
), dig_port
->base
.base
.base
.id
,
919 dig_port
->base
.base
.name
);
921 /* Preserve the BIOS-computed detected bit. This is
922 * supposed to be read-only.
924 DP
= intel_de_read(dev_priv
, intel_dp
->output_reg
) & DP_DETECTED
;
925 DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
926 DP
|= DP_PORT_WIDTH(1);
927 DP
|= DP_LINK_TRAIN_PAT_1
;
929 if (IS_CHERRYVIEW(dev_priv
))
930 DP
|= DP_PIPE_SEL_CHV(pipe
);
932 DP
|= DP_PIPE_SEL(pipe
);
934 pll_enabled
= intel_de_read(dev_priv
, DPLL(pipe
)) & DPLL_VCO_ENABLE
;
937 * The DPLL for the pipe must be enabled for this to work.
938 * So enable temporarily it if it's not already enabled.
941 release_cl_override
= IS_CHERRYVIEW(dev_priv
) &&
942 !chv_phy_powergate_ch(dev_priv
, phy
, ch
, true);
944 if (vlv_force_pll_on(dev_priv
, pipe
, IS_CHERRYVIEW(dev_priv
) ?
945 &chv_dpll
[0].dpll
: &vlv_dpll
[0].dpll
)) {
946 drm_err(&dev_priv
->drm
,
947 "Failed to force on pll for pipe %c!\n",
954 * Similar magic as in intel_dp_enable_port().
955 * We _must_ do this port enable + disable trick
956 * to make this power sequencer lock onto the port.
957 * Otherwise even VDD force bit won't work.
959 intel_de_write(dev_priv
, intel_dp
->output_reg
, DP
);
960 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
962 intel_de_write(dev_priv
, intel_dp
->output_reg
, DP
| DP_PORT_EN
);
963 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
965 intel_de_write(dev_priv
, intel_dp
->output_reg
, DP
& ~DP_PORT_EN
);
966 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
969 vlv_force_pll_off(dev_priv
, pipe
);
971 if (release_cl_override
)
972 chv_phy_powergate_ch(dev_priv
, phy
, ch
, false);
976 static enum pipe
vlv_find_free_pps(struct drm_i915_private
*dev_priv
)
978 struct intel_encoder
*encoder
;
979 unsigned int pipes
= (1 << PIPE_A
) | (1 << PIPE_B
);
982 * We don't have power sequencer currently.
983 * Pick one that's not used by other ports.
985 for_each_intel_dp(&dev_priv
->drm
, encoder
) {
986 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
988 if (encoder
->type
== INTEL_OUTPUT_EDP
) {
989 drm_WARN_ON(&dev_priv
->drm
,
990 intel_dp
->active_pipe
!= INVALID_PIPE
&&
991 intel_dp
->active_pipe
!=
994 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
995 pipes
&= ~(1 << intel_dp
->pps_pipe
);
997 drm_WARN_ON(&dev_priv
->drm
,
998 intel_dp
->pps_pipe
!= INVALID_PIPE
);
1000 if (intel_dp
->active_pipe
!= INVALID_PIPE
)
1001 pipes
&= ~(1 << intel_dp
->active_pipe
);
1006 return INVALID_PIPE
;
1008 return ffs(pipes
) - 1;
1012 vlv_power_sequencer_pipe(struct intel_dp
*intel_dp
)
1014 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1015 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1018 lockdep_assert_held(&dev_priv
->pps_mutex
);
1020 /* We should never land here with regular DP ports */
1021 drm_WARN_ON(&dev_priv
->drm
, !intel_dp_is_edp(intel_dp
));
1023 drm_WARN_ON(&dev_priv
->drm
, intel_dp
->active_pipe
!= INVALID_PIPE
&&
1024 intel_dp
->active_pipe
!= intel_dp
->pps_pipe
);
1026 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
1027 return intel_dp
->pps_pipe
;
1029 pipe
= vlv_find_free_pps(dev_priv
);
1032 * Didn't find one. This should not happen since there
1033 * are two power sequencers and up to two eDP ports.
1035 if (drm_WARN_ON(&dev_priv
->drm
, pipe
== INVALID_PIPE
))
1038 vlv_steal_power_sequencer(dev_priv
, pipe
);
1039 intel_dp
->pps_pipe
= pipe
;
1041 drm_dbg_kms(&dev_priv
->drm
,
1042 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
1043 pipe_name(intel_dp
->pps_pipe
),
1044 dig_port
->base
.base
.base
.id
,
1045 dig_port
->base
.base
.name
);
1047 /* init power sequencer on this pipe and port */
1048 intel_dp_init_panel_power_sequencer(intel_dp
);
1049 intel_dp_init_panel_power_sequencer_registers(intel_dp
, true);
1052 * Even vdd force doesn't work until we've made
1053 * the power sequencer lock in on the port.
1055 vlv_power_sequencer_kick(intel_dp
);
1057 return intel_dp
->pps_pipe
;
1061 bxt_power_sequencer_idx(struct intel_dp
*intel_dp
)
1063 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1064 int backlight_controller
= dev_priv
->vbt
.backlight
.controller
;
1066 lockdep_assert_held(&dev_priv
->pps_mutex
);
1068 /* We should never land here with regular DP ports */
1069 drm_WARN_ON(&dev_priv
->drm
, !intel_dp_is_edp(intel_dp
));
1071 if (!intel_dp
->pps_reset
)
1072 return backlight_controller
;
1074 intel_dp
->pps_reset
= false;
1077 * Only the HW needs to be reprogrammed, the SW state is fixed and
1078 * has been setup during connector init.
1080 intel_dp_init_panel_power_sequencer_registers(intel_dp
, false);
1082 return backlight_controller
;
1085 typedef bool (*vlv_pipe_check
)(struct drm_i915_private
*dev_priv
,
1088 static bool vlv_pipe_has_pp_on(struct drm_i915_private
*dev_priv
,
1091 return intel_de_read(dev_priv
, PP_STATUS(pipe
)) & PP_ON
;
1094 static bool vlv_pipe_has_vdd_on(struct drm_i915_private
*dev_priv
,
1097 return intel_de_read(dev_priv
, PP_CONTROL(pipe
)) & EDP_FORCE_VDD
;
1100 static bool vlv_pipe_any(struct drm_i915_private
*dev_priv
,
1107 vlv_initial_pps_pipe(struct drm_i915_private
*dev_priv
,
1109 vlv_pipe_check pipe_check
)
1113 for (pipe
= PIPE_A
; pipe
<= PIPE_B
; pipe
++) {
1114 u32 port_sel
= intel_de_read(dev_priv
, PP_ON_DELAYS(pipe
)) &
1115 PANEL_PORT_SELECT_MASK
;
1117 if (port_sel
!= PANEL_PORT_SELECT_VLV(port
))
1120 if (!pipe_check(dev_priv
, pipe
))
1126 return INVALID_PIPE
;
1130 vlv_initial_power_sequencer_setup(struct intel_dp
*intel_dp
)
1132 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1133 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1134 enum port port
= dig_port
->base
.port
;
1136 lockdep_assert_held(&dev_priv
->pps_mutex
);
1138 /* try to find a pipe with this port selected */
1139 /* first pick one where the panel is on */
1140 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
1141 vlv_pipe_has_pp_on
);
1142 /* didn't find one? pick one where vdd is on */
1143 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
1144 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
1145 vlv_pipe_has_vdd_on
);
1146 /* didn't find one? pick one with just the correct port */
1147 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
1148 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
1151 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
1152 if (intel_dp
->pps_pipe
== INVALID_PIPE
) {
1153 drm_dbg_kms(&dev_priv
->drm
,
1154 "no initial power sequencer for [ENCODER:%d:%s]\n",
1155 dig_port
->base
.base
.base
.id
,
1156 dig_port
->base
.base
.name
);
1160 drm_dbg_kms(&dev_priv
->drm
,
1161 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
1162 dig_port
->base
.base
.base
.id
,
1163 dig_port
->base
.base
.name
,
1164 pipe_name(intel_dp
->pps_pipe
));
1166 intel_dp_init_panel_power_sequencer(intel_dp
);
1167 intel_dp_init_panel_power_sequencer_registers(intel_dp
, false);
1170 void intel_power_sequencer_reset(struct drm_i915_private
*dev_priv
)
1172 struct intel_encoder
*encoder
;
1174 if (drm_WARN_ON(&dev_priv
->drm
,
1175 !(IS_VALLEYVIEW(dev_priv
) ||
1176 IS_CHERRYVIEW(dev_priv
) ||
1177 IS_GEN9_LP(dev_priv
))))
1181 * We can't grab pps_mutex here due to deadlock with power_domain
1182 * mutex when power_domain functions are called while holding pps_mutex.
1183 * That also means that in order to use pps_pipe the code needs to
1184 * hold both a power domain reference and pps_mutex, and the power domain
1185 * reference get/put must be done while _not_ holding pps_mutex.
1186 * pps_{lock,unlock}() do these steps in the correct order, so one
1187 * should use them always.
1190 for_each_intel_dp(&dev_priv
->drm
, encoder
) {
1191 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
1193 drm_WARN_ON(&dev_priv
->drm
,
1194 intel_dp
->active_pipe
!= INVALID_PIPE
);
1196 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
1199 if (IS_GEN9_LP(dev_priv
))
1200 intel_dp
->pps_reset
= true;
1202 intel_dp
->pps_pipe
= INVALID_PIPE
;
1206 struct pps_registers
{
1214 static void intel_pps_get_registers(struct intel_dp
*intel_dp
,
1215 struct pps_registers
*regs
)
1217 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1220 memset(regs
, 0, sizeof(*regs
));
1222 if (IS_GEN9_LP(dev_priv
))
1223 pps_idx
= bxt_power_sequencer_idx(intel_dp
);
1224 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
1225 pps_idx
= vlv_power_sequencer_pipe(intel_dp
);
1227 regs
->pp_ctrl
= PP_CONTROL(pps_idx
);
1228 regs
->pp_stat
= PP_STATUS(pps_idx
);
1229 regs
->pp_on
= PP_ON_DELAYS(pps_idx
);
1230 regs
->pp_off
= PP_OFF_DELAYS(pps_idx
);
1232 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
1233 if (IS_GEN9_LP(dev_priv
) || INTEL_PCH_TYPE(dev_priv
) >= PCH_CNP
)
1234 regs
->pp_div
= INVALID_MMIO_REG
;
1236 regs
->pp_div
= PP_DIVISOR(pps_idx
);
1240 _pp_ctrl_reg(struct intel_dp
*intel_dp
)
1242 struct pps_registers regs
;
1244 intel_pps_get_registers(intel_dp
, ®s
);
1246 return regs
.pp_ctrl
;
1250 _pp_stat_reg(struct intel_dp
*intel_dp
)
1252 struct pps_registers regs
;
1254 intel_pps_get_registers(intel_dp
, ®s
);
1256 return regs
.pp_stat
;
1259 static bool edp_have_panel_power(struct intel_dp
*intel_dp
)
1261 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1263 lockdep_assert_held(&dev_priv
->pps_mutex
);
1265 if ((IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) &&
1266 intel_dp
->pps_pipe
== INVALID_PIPE
)
1269 return (intel_de_read(dev_priv
, _pp_stat_reg(intel_dp
)) & PP_ON
) != 0;
1272 static bool edp_have_panel_vdd(struct intel_dp
*intel_dp
)
1274 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1276 lockdep_assert_held(&dev_priv
->pps_mutex
);
1278 if ((IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) &&
1279 intel_dp
->pps_pipe
== INVALID_PIPE
)
1282 return intel_de_read(dev_priv
, _pp_ctrl_reg(intel_dp
)) & EDP_FORCE_VDD
;
1286 intel_dp_check_edp(struct intel_dp
*intel_dp
)
1288 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1290 if (!intel_dp_is_edp(intel_dp
))
1293 if (!edp_have_panel_power(intel_dp
) && !edp_have_panel_vdd(intel_dp
)) {
1294 drm_WARN(&dev_priv
->drm
, 1,
1295 "eDP powered off while attempting aux channel communication.\n");
1296 drm_dbg_kms(&dev_priv
->drm
, "Status 0x%08x Control 0x%08x\n",
1297 intel_de_read(dev_priv
, _pp_stat_reg(intel_dp
)),
1298 intel_de_read(dev_priv
, _pp_ctrl_reg(intel_dp
)));
1303 intel_dp_aux_wait_done(struct intel_dp
*intel_dp
)
1305 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
1306 i915_reg_t ch_ctl
= intel_dp
->aux_ch_ctl_reg(intel_dp
);
1307 const unsigned int timeout_ms
= 10;
1311 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1312 done
= wait_event_timeout(i915
->gmbus_wait_queue
, C
,
1313 msecs_to_jiffies_timeout(timeout_ms
));
1315 /* just trace the final value */
1316 trace_i915_reg_rw(false, ch_ctl
, status
, sizeof(status
), true);
1320 "%s: did not complete or timeout within %ums (status 0x%08x)\n",
1321 intel_dp
->aux
.name
, timeout_ms
, status
);
1327 static u32
g4x_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
1329 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1335 * The clock divider is based off the hrawclk, and would like to run at
1336 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
1338 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv
)->rawclk_freq
, 2000);
1341 static u32
ilk_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
1343 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1344 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1351 * The clock divider is based off the cdclk or PCH rawclk, and would
1352 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1353 * divide by 2000 and use that
1355 if (dig_port
->aux_ch
== AUX_CH_A
)
1356 freq
= dev_priv
->cdclk
.hw
.cdclk
;
1358 freq
= RUNTIME_INFO(dev_priv
)->rawclk_freq
;
1359 return DIV_ROUND_CLOSEST(freq
, 2000);
1362 static u32
hsw_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
1364 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1365 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1367 if (dig_port
->aux_ch
!= AUX_CH_A
&& HAS_PCH_LPT_H(dev_priv
)) {
1368 /* Workaround for non-ULT HSW */
1376 return ilk_get_aux_clock_divider(intel_dp
, index
);
1379 static u32
skl_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
1382 * SKL doesn't need us to program the AUX clock divider (Hardware will
1383 * derive the clock from CDCLK automatically). We still implement the
1384 * get_aux_clock_divider vfunc to plug-in into the existing code.
1386 return index
? 0 : 1;
1389 static u32
g4x_get_aux_send_ctl(struct intel_dp
*intel_dp
,
1391 u32 aux_clock_divider
)
1393 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1394 struct drm_i915_private
*dev_priv
=
1395 to_i915(dig_port
->base
.base
.dev
);
1396 u32 precharge
, timeout
;
1398 if (IS_GEN(dev_priv
, 6))
1403 if (IS_BROADWELL(dev_priv
))
1404 timeout
= DP_AUX_CH_CTL_TIME_OUT_600us
;
1406 timeout
= DP_AUX_CH_CTL_TIME_OUT_400us
;
1408 return DP_AUX_CH_CTL_SEND_BUSY
|
1409 DP_AUX_CH_CTL_DONE
|
1410 DP_AUX_CH_CTL_INTERRUPT
|
1411 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
1413 DP_AUX_CH_CTL_RECEIVE_ERROR
|
1414 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
1415 (precharge
<< DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT
) |
1416 (aux_clock_divider
<< DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT
);
1419 static u32
skl_get_aux_send_ctl(struct intel_dp
*intel_dp
,
1423 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1424 struct drm_i915_private
*i915
=
1425 to_i915(dig_port
->base
.base
.dev
);
1426 enum phy phy
= intel_port_to_phy(i915
, dig_port
->base
.port
);
1429 ret
= DP_AUX_CH_CTL_SEND_BUSY
|
1430 DP_AUX_CH_CTL_DONE
|
1431 DP_AUX_CH_CTL_INTERRUPT
|
1432 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
1433 DP_AUX_CH_CTL_TIME_OUT_MAX
|
1434 DP_AUX_CH_CTL_RECEIVE_ERROR
|
1435 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
1436 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1437 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1439 if (intel_phy_is_tc(i915
, phy
) &&
1440 dig_port
->tc_mode
== TC_PORT_TBT_ALT
)
1441 ret
|= DP_AUX_CH_CTL_TBT_IO
;
1447 intel_dp_aux_xfer(struct intel_dp
*intel_dp
,
1448 const u8
*send
, int send_bytes
,
1449 u8
*recv
, int recv_size
,
1450 u32 aux_send_ctl_flags
)
1452 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1453 struct drm_i915_private
*i915
=
1454 to_i915(dig_port
->base
.base
.dev
);
1455 struct intel_uncore
*uncore
= &i915
->uncore
;
1456 enum phy phy
= intel_port_to_phy(i915
, dig_port
->base
.port
);
1457 bool is_tc_port
= intel_phy_is_tc(i915
, phy
);
1458 i915_reg_t ch_ctl
, ch_data
[5];
1459 u32 aux_clock_divider
;
1460 enum intel_display_power_domain aux_domain
;
1461 intel_wakeref_t aux_wakeref
;
1462 intel_wakeref_t pps_wakeref
;
1463 int i
, ret
, recv_bytes
;
1468 ch_ctl
= intel_dp
->aux_ch_ctl_reg(intel_dp
);
1469 for (i
= 0; i
< ARRAY_SIZE(ch_data
); i
++)
1470 ch_data
[i
] = intel_dp
->aux_ch_data_reg(intel_dp
, i
);
1473 intel_tc_port_lock(dig_port
);
1475 aux_domain
= intel_aux_power_domain(dig_port
);
1477 aux_wakeref
= intel_display_power_get(i915
, aux_domain
);
1478 pps_wakeref
= pps_lock(intel_dp
);
1481 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1482 * In such cases we want to leave VDD enabled and it's up to upper layers
1483 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1486 vdd
= edp_panel_vdd_on(intel_dp
);
1488 /* dp aux is extremely sensitive to irq latency, hence request the
1489 * lowest possible wakeup latency and so prevent the cpu from going into
1490 * deep sleep states.
1492 cpu_latency_qos_update_request(&intel_dp
->pm_qos
, 0);
1494 intel_dp_check_edp(intel_dp
);
1496 /* Try to wait for any previous AUX channel activity */
1497 for (try = 0; try < 3; try++) {
1498 status
= intel_uncore_read_notrace(uncore
, ch_ctl
);
1499 if ((status
& DP_AUX_CH_CTL_SEND_BUSY
) == 0)
1503 /* just trace the final value */
1504 trace_i915_reg_rw(false, ch_ctl
, status
, sizeof(status
), true);
1507 const u32 status
= intel_uncore_read(uncore
, ch_ctl
);
1509 if (status
!= intel_dp
->aux_busy_last_status
) {
1510 drm_WARN(&i915
->drm
, 1,
1511 "%s: not started (status 0x%08x)\n",
1512 intel_dp
->aux
.name
, status
);
1513 intel_dp
->aux_busy_last_status
= status
;
1520 /* Only 5 data registers! */
1521 if (drm_WARN_ON(&i915
->drm
, send_bytes
> 20 || recv_size
> 20)) {
1526 while ((aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, clock
++))) {
1527 u32 send_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
,
1531 send_ctl
|= aux_send_ctl_flags
;
1533 /* Must try at least 3 times according to DP spec */
1534 for (try = 0; try < 5; try++) {
1535 /* Load the send data into the aux channel data registers */
1536 for (i
= 0; i
< send_bytes
; i
+= 4)
1537 intel_uncore_write(uncore
,
1539 intel_dp_pack_aux(send
+ i
,
1542 /* Send the command and wait for it to complete */
1543 intel_uncore_write(uncore
, ch_ctl
, send_ctl
);
1545 status
= intel_dp_aux_wait_done(intel_dp
);
1547 /* Clear done status and any errors */
1548 intel_uncore_write(uncore
,
1551 DP_AUX_CH_CTL_DONE
|
1552 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
1553 DP_AUX_CH_CTL_RECEIVE_ERROR
);
1555 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1556 * 400us delay required for errors and timeouts
1557 * Timeout errors from the HW already meet this
1558 * requirement so skip to next iteration
1560 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
)
1563 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
1564 usleep_range(400, 500);
1567 if (status
& DP_AUX_CH_CTL_DONE
)
1572 if ((status
& DP_AUX_CH_CTL_DONE
) == 0) {
1573 drm_err(&i915
->drm
, "%s: not done (status 0x%08x)\n",
1574 intel_dp
->aux
.name
, status
);
1580 /* Check for timeout or receive error.
1581 * Timeouts occur when the sink is not connected
1583 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
1584 drm_err(&i915
->drm
, "%s: receive error (status 0x%08x)\n",
1585 intel_dp
->aux
.name
, status
);
1590 /* Timeouts occur when the device isn't connected, so they're
1591 * "normal" -- don't fill the kernel log with these */
1592 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
) {
1593 drm_dbg_kms(&i915
->drm
, "%s: timeout (status 0x%08x)\n",
1594 intel_dp
->aux
.name
, status
);
1599 /* Unload any bytes sent back from the other side */
1600 recv_bytes
= ((status
& DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
) >>
1601 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
);
1604 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1605 * We have no idea of what happened so we return -EBUSY so
1606 * drm layer takes care for the necessary retries.
1608 if (recv_bytes
== 0 || recv_bytes
> 20) {
1609 drm_dbg_kms(&i915
->drm
,
1610 "%s: Forbidden recv_bytes = %d on aux transaction\n",
1611 intel_dp
->aux
.name
, recv_bytes
);
1616 if (recv_bytes
> recv_size
)
1617 recv_bytes
= recv_size
;
1619 for (i
= 0; i
< recv_bytes
; i
+= 4)
1620 intel_dp_unpack_aux(intel_uncore_read(uncore
, ch_data
[i
>> 2]),
1621 recv
+ i
, recv_bytes
- i
);
1625 cpu_latency_qos_update_request(&intel_dp
->pm_qos
, PM_QOS_DEFAULT_VALUE
);
1628 edp_panel_vdd_off(intel_dp
, false);
1630 pps_unlock(intel_dp
, pps_wakeref
);
1631 intel_display_power_put_async(i915
, aux_domain
, aux_wakeref
);
1634 intel_tc_port_unlock(dig_port
);
1639 #define BARE_ADDRESS_SIZE 3
1640 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1643 intel_dp_aux_header(u8 txbuf
[HEADER_SIZE
],
1644 const struct drm_dp_aux_msg
*msg
)
1646 txbuf
[0] = (msg
->request
<< 4) | ((msg
->address
>> 16) & 0xf);
1647 txbuf
[1] = (msg
->address
>> 8) & 0xff;
1648 txbuf
[2] = msg
->address
& 0xff;
1649 txbuf
[3] = msg
->size
- 1;
1652 static u32
intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg
*msg
)
1655 * If we're trying to send the HDCP Aksv, we need to set a the Aksv
1656 * select bit to inform the hardware to send the Aksv after our header
1657 * since we can't access that data from software.
1659 if ((msg
->request
& ~DP_AUX_I2C_MOT
) == DP_AUX_NATIVE_WRITE
&&
1660 msg
->address
== DP_AUX_HDCP_AKSV
)
1661 return DP_AUX_CH_CTL_AUX_AKSV_SELECT
;
1667 intel_dp_aux_transfer(struct drm_dp_aux
*aux
, struct drm_dp_aux_msg
*msg
)
1669 struct intel_dp
*intel_dp
= container_of(aux
, struct intel_dp
, aux
);
1670 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
1671 u8 txbuf
[20], rxbuf
[20];
1672 size_t txsize
, rxsize
;
1673 u32 flags
= intel_dp_aux_xfer_flags(msg
);
1676 intel_dp_aux_header(txbuf
, msg
);
1678 switch (msg
->request
& ~DP_AUX_I2C_MOT
) {
1679 case DP_AUX_NATIVE_WRITE
:
1680 case DP_AUX_I2C_WRITE
:
1681 case DP_AUX_I2C_WRITE_STATUS_UPDATE
:
1682 txsize
= msg
->size
? HEADER_SIZE
+ msg
->size
: BARE_ADDRESS_SIZE
;
1683 rxsize
= 2; /* 0 or 1 data bytes */
1685 if (drm_WARN_ON(&i915
->drm
, txsize
> 20))
1688 drm_WARN_ON(&i915
->drm
, !msg
->buffer
!= !msg
->size
);
1691 memcpy(txbuf
+ HEADER_SIZE
, msg
->buffer
, msg
->size
);
1693 ret
= intel_dp_aux_xfer(intel_dp
, txbuf
, txsize
,
1694 rxbuf
, rxsize
, flags
);
1696 msg
->reply
= rxbuf
[0] >> 4;
1699 /* Number of bytes written in a short write. */
1700 ret
= clamp_t(int, rxbuf
[1], 0, msg
->size
);
1702 /* Return payload size. */
1708 case DP_AUX_NATIVE_READ
:
1709 case DP_AUX_I2C_READ
:
1710 txsize
= msg
->size
? HEADER_SIZE
: BARE_ADDRESS_SIZE
;
1711 rxsize
= msg
->size
+ 1;
1713 if (drm_WARN_ON(&i915
->drm
, rxsize
> 20))
1716 ret
= intel_dp_aux_xfer(intel_dp
, txbuf
, txsize
,
1717 rxbuf
, rxsize
, flags
);
1719 msg
->reply
= rxbuf
[0] >> 4;
1721 * Assume happy day, and copy the data. The caller is
1722 * expected to check msg->reply before touching it.
1724 * Return payload size.
1727 memcpy(msg
->buffer
, rxbuf
+ 1, ret
);
1740 static i915_reg_t
g4x_aux_ctl_reg(struct intel_dp
*intel_dp
)
1742 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1743 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1744 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1750 return DP_AUX_CH_CTL(aux_ch
);
1752 MISSING_CASE(aux_ch
);
1753 return DP_AUX_CH_CTL(AUX_CH_B
);
1757 static i915_reg_t
g4x_aux_data_reg(struct intel_dp
*intel_dp
, int index
)
1759 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1760 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1761 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1767 return DP_AUX_CH_DATA(aux_ch
, index
);
1769 MISSING_CASE(aux_ch
);
1770 return DP_AUX_CH_DATA(AUX_CH_B
, index
);
1774 static i915_reg_t
ilk_aux_ctl_reg(struct intel_dp
*intel_dp
)
1776 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1777 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1778 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1782 return DP_AUX_CH_CTL(aux_ch
);
1786 return PCH_DP_AUX_CH_CTL(aux_ch
);
1788 MISSING_CASE(aux_ch
);
1789 return DP_AUX_CH_CTL(AUX_CH_A
);
1793 static i915_reg_t
ilk_aux_data_reg(struct intel_dp
*intel_dp
, int index
)
1795 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1796 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1797 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1801 return DP_AUX_CH_DATA(aux_ch
, index
);
1805 return PCH_DP_AUX_CH_DATA(aux_ch
, index
);
1807 MISSING_CASE(aux_ch
);
1808 return DP_AUX_CH_DATA(AUX_CH_A
, index
);
1812 static i915_reg_t
skl_aux_ctl_reg(struct intel_dp
*intel_dp
)
1814 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1815 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1816 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1825 return DP_AUX_CH_CTL(aux_ch
);
1827 MISSING_CASE(aux_ch
);
1828 return DP_AUX_CH_CTL(AUX_CH_A
);
1832 static i915_reg_t
skl_aux_data_reg(struct intel_dp
*intel_dp
, int index
)
1834 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1835 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1836 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1845 return DP_AUX_CH_DATA(aux_ch
, index
);
1847 MISSING_CASE(aux_ch
);
1848 return DP_AUX_CH_DATA(AUX_CH_A
, index
);
1852 static i915_reg_t
tgl_aux_ctl_reg(struct intel_dp
*intel_dp
)
1854 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1855 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1856 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1868 return DP_AUX_CH_CTL(aux_ch
);
1870 MISSING_CASE(aux_ch
);
1871 return DP_AUX_CH_CTL(AUX_CH_A
);
1875 static i915_reg_t
tgl_aux_data_reg(struct intel_dp
*intel_dp
, int index
)
1877 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1878 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1879 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1891 return DP_AUX_CH_DATA(aux_ch
, index
);
1893 MISSING_CASE(aux_ch
);
1894 return DP_AUX_CH_DATA(AUX_CH_A
, index
);
1899 intel_dp_aux_fini(struct intel_dp
*intel_dp
)
1901 if (cpu_latency_qos_request_active(&intel_dp
->pm_qos
))
1902 cpu_latency_qos_remove_request(&intel_dp
->pm_qos
);
1904 kfree(intel_dp
->aux
.name
);
1908 intel_dp_aux_init(struct intel_dp
*intel_dp
)
1910 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1911 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1912 struct intel_encoder
*encoder
= &dig_port
->base
;
1913 enum aux_ch aux_ch
= dig_port
->aux_ch
;
1915 if (INTEL_GEN(dev_priv
) >= 12) {
1916 intel_dp
->aux_ch_ctl_reg
= tgl_aux_ctl_reg
;
1917 intel_dp
->aux_ch_data_reg
= tgl_aux_data_reg
;
1918 } else if (INTEL_GEN(dev_priv
) >= 9) {
1919 intel_dp
->aux_ch_ctl_reg
= skl_aux_ctl_reg
;
1920 intel_dp
->aux_ch_data_reg
= skl_aux_data_reg
;
1921 } else if (HAS_PCH_SPLIT(dev_priv
)) {
1922 intel_dp
->aux_ch_ctl_reg
= ilk_aux_ctl_reg
;
1923 intel_dp
->aux_ch_data_reg
= ilk_aux_data_reg
;
1925 intel_dp
->aux_ch_ctl_reg
= g4x_aux_ctl_reg
;
1926 intel_dp
->aux_ch_data_reg
= g4x_aux_data_reg
;
1929 if (INTEL_GEN(dev_priv
) >= 9)
1930 intel_dp
->get_aux_clock_divider
= skl_get_aux_clock_divider
;
1931 else if (IS_BROADWELL(dev_priv
) || IS_HASWELL(dev_priv
))
1932 intel_dp
->get_aux_clock_divider
= hsw_get_aux_clock_divider
;
1933 else if (HAS_PCH_SPLIT(dev_priv
))
1934 intel_dp
->get_aux_clock_divider
= ilk_get_aux_clock_divider
;
1936 intel_dp
->get_aux_clock_divider
= g4x_get_aux_clock_divider
;
1938 if (INTEL_GEN(dev_priv
) >= 9)
1939 intel_dp
->get_aux_send_ctl
= skl_get_aux_send_ctl
;
1941 intel_dp
->get_aux_send_ctl
= g4x_get_aux_send_ctl
;
1943 drm_dp_aux_init(&intel_dp
->aux
);
1945 /* Failure to allocate our preferred name is not critical */
1946 if (INTEL_GEN(dev_priv
) >= 12 && aux_ch
>= AUX_CH_USBC1
)
1947 intel_dp
->aux
.name
= kasprintf(GFP_KERNEL
, "AUX USBC%c/%s",
1948 aux_ch
- AUX_CH_USBC1
+ '1',
1949 encoder
->base
.name
);
1951 intel_dp
->aux
.name
= kasprintf(GFP_KERNEL
, "AUX %c/%s",
1952 aux_ch_name(aux_ch
),
1953 encoder
->base
.name
);
1955 intel_dp
->aux
.transfer
= intel_dp_aux_transfer
;
1956 cpu_latency_qos_add_request(&intel_dp
->pm_qos
, PM_QOS_DEFAULT_VALUE
);
1959 bool intel_dp_source_supports_hbr2(struct intel_dp
*intel_dp
)
1961 int max_rate
= intel_dp
->source_rates
[intel_dp
->num_source_rates
- 1];
1963 return max_rate
>= 540000;
1966 bool intel_dp_source_supports_hbr3(struct intel_dp
*intel_dp
)
1968 int max_rate
= intel_dp
->source_rates
[intel_dp
->num_source_rates
- 1];
1970 return max_rate
>= 810000;
1974 intel_dp_set_clock(struct intel_encoder
*encoder
,
1975 struct intel_crtc_state
*pipe_config
)
1977 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
1978 const struct dp_link_dpll
*divisor
= NULL
;
1981 if (IS_G4X(dev_priv
)) {
1983 count
= ARRAY_SIZE(g4x_dpll
);
1984 } else if (HAS_PCH_SPLIT(dev_priv
)) {
1986 count
= ARRAY_SIZE(pch_dpll
);
1987 } else if (IS_CHERRYVIEW(dev_priv
)) {
1989 count
= ARRAY_SIZE(chv_dpll
);
1990 } else if (IS_VALLEYVIEW(dev_priv
)) {
1992 count
= ARRAY_SIZE(vlv_dpll
);
1995 if (divisor
&& count
) {
1996 for (i
= 0; i
< count
; i
++) {
1997 if (pipe_config
->port_clock
== divisor
[i
].clock
) {
1998 pipe_config
->dpll
= divisor
[i
].dpll
;
1999 pipe_config
->clock_set
= true;
2006 static void snprintf_int_array(char *str
, size_t len
,
2007 const int *array
, int nelem
)
2013 for (i
= 0; i
< nelem
; i
++) {
2014 int r
= snprintf(str
, len
, "%s%d", i
? ", " : "", array
[i
]);
2022 static void intel_dp_print_rates(struct intel_dp
*intel_dp
)
2024 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
2025 char str
[128]; /* FIXME: too big for stack? */
2027 if (!drm_debug_enabled(DRM_UT_KMS
))
2030 snprintf_int_array(str
, sizeof(str
),
2031 intel_dp
->source_rates
, intel_dp
->num_source_rates
);
2032 drm_dbg_kms(&i915
->drm
, "source rates: %s\n", str
);
2034 snprintf_int_array(str
, sizeof(str
),
2035 intel_dp
->sink_rates
, intel_dp
->num_sink_rates
);
2036 drm_dbg_kms(&i915
->drm
, "sink rates: %s\n", str
);
2038 snprintf_int_array(str
, sizeof(str
),
2039 intel_dp
->common_rates
, intel_dp
->num_common_rates
);
2040 drm_dbg_kms(&i915
->drm
, "common rates: %s\n", str
);
2044 intel_dp_max_link_rate(struct intel_dp
*intel_dp
)
2046 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
2049 len
= intel_dp_common_len_rate_limit(intel_dp
, intel_dp
->max_link_rate
);
2050 if (drm_WARN_ON(&i915
->drm
, len
<= 0))
2053 return intel_dp
->common_rates
[len
- 1];
2056 int intel_dp_rate_select(struct intel_dp
*intel_dp
, int rate
)
2058 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
2059 int i
= intel_dp_rate_index(intel_dp
->sink_rates
,
2060 intel_dp
->num_sink_rates
, rate
);
2062 if (drm_WARN_ON(&i915
->drm
, i
< 0))
2068 void intel_dp_compute_rate(struct intel_dp
*intel_dp
, int port_clock
,
2069 u8
*link_bw
, u8
*rate_select
)
2071 /* eDP 1.4 rate select method. */
2072 if (intel_dp
->use_rate_select
) {
2075 intel_dp_rate_select(intel_dp
, port_clock
);
2077 *link_bw
= drm_dp_link_rate_to_bw_code(port_clock
);
2082 static bool intel_dp_source_supports_fec(struct intel_dp
*intel_dp
,
2083 const struct intel_crtc_state
*pipe_config
)
2085 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2087 /* On TGL, FEC is supported on all Pipes */
2088 if (INTEL_GEN(dev_priv
) >= 12)
2091 if (IS_GEN(dev_priv
, 11) && pipe_config
->cpu_transcoder
!= TRANSCODER_A
)
2097 static bool intel_dp_supports_fec(struct intel_dp
*intel_dp
,
2098 const struct intel_crtc_state
*pipe_config
)
2100 return intel_dp_source_supports_fec(intel_dp
, pipe_config
) &&
2101 drm_dp_sink_supports_fec(intel_dp
->fec_capable
);
2104 static bool intel_dp_supports_dsc(struct intel_dp
*intel_dp
,
2105 const struct intel_crtc_state
*crtc_state
)
2107 if (intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_DP
) && !crtc_state
->fec_enable
)
2110 return intel_dsc_source_support(crtc_state
) &&
2111 drm_dp_sink_supports_dsc(intel_dp
->dsc_dpcd
);
2114 static bool intel_dp_hdmi_ycbcr420(struct intel_dp
*intel_dp
,
2115 const struct intel_crtc_state
*crtc_state
)
2117 return crtc_state
->output_format
== INTEL_OUTPUT_FORMAT_YCBCR420
||
2118 (crtc_state
->output_format
== INTEL_OUTPUT_FORMAT_YCBCR444
&&
2119 intel_dp
->dfp
.ycbcr_444_to_420
);
2122 static int intel_dp_hdmi_tmds_clock(struct intel_dp
*intel_dp
,
2123 const struct intel_crtc_state
*crtc_state
, int bpc
)
2125 int clock
= crtc_state
->hw
.adjusted_mode
.crtc_clock
* bpc
/ 8;
2127 if (intel_dp_hdmi_ycbcr420(intel_dp
, crtc_state
))
2133 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp
*intel_dp
,
2134 const struct intel_crtc_state
*crtc_state
, int bpc
)
2136 int tmds_clock
= intel_dp_hdmi_tmds_clock(intel_dp
, crtc_state
, bpc
);
2138 if (intel_dp
->dfp
.min_tmds_clock
&&
2139 tmds_clock
< intel_dp
->dfp
.min_tmds_clock
)
2142 if (intel_dp
->dfp
.max_tmds_clock
&&
2143 tmds_clock
> intel_dp
->dfp
.max_tmds_clock
)
2149 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp
*intel_dp
,
2150 const struct intel_crtc_state
*crtc_state
,
2154 return intel_hdmi_deep_color_possible(crtc_state
, bpc
,
2155 intel_dp
->has_hdmi_sink
,
2156 intel_dp_hdmi_ycbcr420(intel_dp
, crtc_state
)) &&
2157 intel_dp_hdmi_tmds_clock_valid(intel_dp
, crtc_state
, bpc
);
2160 static int intel_dp_max_bpp(struct intel_dp
*intel_dp
,
2161 const struct intel_crtc_state
*crtc_state
)
2163 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2164 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
2167 bpc
= crtc_state
->pipe_bpp
/ 3;
2169 if (intel_dp
->dfp
.max_bpc
)
2170 bpc
= min_t(int, bpc
, intel_dp
->dfp
.max_bpc
);
2172 if (intel_dp
->dfp
.min_tmds_clock
) {
2173 for (; bpc
>= 10; bpc
-= 2) {
2174 if (intel_dp_hdmi_deep_color_possible(intel_dp
, crtc_state
, bpc
))
2180 if (intel_dp_is_edp(intel_dp
)) {
2181 /* Get bpp from vbt only for panels that dont have bpp in edid */
2182 if (intel_connector
->base
.display_info
.bpc
== 0 &&
2183 dev_priv
->vbt
.edp
.bpp
&& dev_priv
->vbt
.edp
.bpp
< bpp
) {
2184 drm_dbg_kms(&dev_priv
->drm
,
2185 "clamping bpp for eDP panel to BIOS-provided %i\n",
2186 dev_priv
->vbt
.edp
.bpp
);
2187 bpp
= dev_priv
->vbt
.edp
.bpp
;
2194 /* Adjust link config limits based on compliance test requests. */
2196 intel_dp_adjust_compliance_config(struct intel_dp
*intel_dp
,
2197 struct intel_crtc_state
*pipe_config
,
2198 struct link_config_limits
*limits
)
2200 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
2202 /* For DP Compliance we override the computed bpp for the pipe */
2203 if (intel_dp
->compliance
.test_data
.bpc
!= 0) {
2204 int bpp
= 3 * intel_dp
->compliance
.test_data
.bpc
;
2206 limits
->min_bpp
= limits
->max_bpp
= bpp
;
2207 pipe_config
->dither_force_disable
= bpp
== 6 * 3;
2209 drm_dbg_kms(&i915
->drm
, "Setting pipe_bpp to %d\n", bpp
);
2212 /* Use values requested by Compliance Test Request */
2213 if (intel_dp
->compliance
.test_type
== DP_TEST_LINK_TRAINING
) {
2216 /* Validate the compliance test data since max values
2217 * might have changed due to link train fallback.
2219 if (intel_dp_link_params_valid(intel_dp
, intel_dp
->compliance
.test_link_rate
,
2220 intel_dp
->compliance
.test_lane_count
)) {
2221 index
= intel_dp_rate_index(intel_dp
->common_rates
,
2222 intel_dp
->num_common_rates
,
2223 intel_dp
->compliance
.test_link_rate
);
2225 limits
->min_clock
= limits
->max_clock
= index
;
2226 limits
->min_lane_count
= limits
->max_lane_count
=
2227 intel_dp
->compliance
.test_lane_count
;
2232 /* Optimize link config in order: max bpp, min clock, min lanes */
2234 intel_dp_compute_link_config_wide(struct intel_dp
*intel_dp
,
2235 struct intel_crtc_state
*pipe_config
,
2236 const struct link_config_limits
*limits
)
2238 struct drm_display_mode
*adjusted_mode
= &pipe_config
->hw
.adjusted_mode
;
2239 int bpp
, clock
, lane_count
;
2240 int mode_rate
, link_clock
, link_avail
;
2242 for (bpp
= limits
->max_bpp
; bpp
>= limits
->min_bpp
; bpp
-= 2 * 3) {
2243 int output_bpp
= intel_dp_output_bpp(pipe_config
->output_format
, bpp
);
2245 mode_rate
= intel_dp_link_required(adjusted_mode
->crtc_clock
,
2248 for (clock
= limits
->min_clock
; clock
<= limits
->max_clock
; clock
++) {
2249 for (lane_count
= limits
->min_lane_count
;
2250 lane_count
<= limits
->max_lane_count
;
2252 link_clock
= intel_dp
->common_rates
[clock
];
2253 link_avail
= intel_dp_max_data_rate(link_clock
,
2256 if (mode_rate
<= link_avail
) {
2257 pipe_config
->lane_count
= lane_count
;
2258 pipe_config
->pipe_bpp
= bpp
;
2259 pipe_config
->port_clock
= link_clock
;
2270 static int intel_dp_dsc_compute_bpp(struct intel_dp
*intel_dp
, u8 dsc_max_bpc
)
2273 u8 dsc_bpc
[3] = {0};
2275 num_bpc
= drm_dp_dsc_sink_supported_input_bpcs(intel_dp
->dsc_dpcd
,
2277 for (i
= 0; i
< num_bpc
; i
++) {
2278 if (dsc_max_bpc
>= dsc_bpc
[i
])
2279 return dsc_bpc
[i
] * 3;
2285 #define DSC_SUPPORTED_VERSION_MIN 1
2287 static int intel_dp_dsc_compute_params(struct intel_encoder
*encoder
,
2288 struct intel_crtc_state
*crtc_state
)
2290 struct drm_i915_private
*i915
= to_i915(encoder
->base
.dev
);
2291 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
2292 struct drm_dsc_config
*vdsc_cfg
= &crtc_state
->dsc
.config
;
2296 ret
= intel_dsc_compute_params(encoder
, crtc_state
);
2301 * Slice Height of 8 works for all currently available panels. So start
2302 * with that if pic_height is an integral multiple of 8. Eventually add
2303 * logic to try multiple slice heights.
2305 if (vdsc_cfg
->pic_height
% 8 == 0)
2306 vdsc_cfg
->slice_height
= 8;
2307 else if (vdsc_cfg
->pic_height
% 4 == 0)
2308 vdsc_cfg
->slice_height
= 4;
2310 vdsc_cfg
->slice_height
= 2;
2312 vdsc_cfg
->dsc_version_major
=
2313 (intel_dp
->dsc_dpcd
[DP_DSC_REV
- DP_DSC_SUPPORT
] &
2314 DP_DSC_MAJOR_MASK
) >> DP_DSC_MAJOR_SHIFT
;
2315 vdsc_cfg
->dsc_version_minor
=
2316 min(DSC_SUPPORTED_VERSION_MIN
,
2317 (intel_dp
->dsc_dpcd
[DP_DSC_REV
- DP_DSC_SUPPORT
] &
2318 DP_DSC_MINOR_MASK
) >> DP_DSC_MINOR_SHIFT
);
2320 vdsc_cfg
->convert_rgb
= intel_dp
->dsc_dpcd
[DP_DSC_DEC_COLOR_FORMAT_CAP
- DP_DSC_SUPPORT
] &
2323 line_buf_depth
= drm_dp_dsc_sink_line_buf_depth(intel_dp
->dsc_dpcd
);
2324 if (!line_buf_depth
) {
2325 drm_dbg_kms(&i915
->drm
,
2326 "DSC Sink Line Buffer Depth invalid\n");
2330 if (vdsc_cfg
->dsc_version_minor
== 2)
2331 vdsc_cfg
->line_buf_depth
= (line_buf_depth
== DSC_1_2_MAX_LINEBUF_DEPTH_BITS
) ?
2332 DSC_1_2_MAX_LINEBUF_DEPTH_VAL
: line_buf_depth
;
2334 vdsc_cfg
->line_buf_depth
= (line_buf_depth
> DSC_1_1_MAX_LINEBUF_DEPTH_BITS
) ?
2335 DSC_1_1_MAX_LINEBUF_DEPTH_BITS
: line_buf_depth
;
2337 vdsc_cfg
->block_pred_enable
=
2338 intel_dp
->dsc_dpcd
[DP_DSC_BLK_PREDICTION_SUPPORT
- DP_DSC_SUPPORT
] &
2339 DP_DSC_BLK_PREDICTION_IS_SUPPORTED
;
2341 return drm_dsc_compute_rc_parameters(vdsc_cfg
);
2344 static int intel_dp_dsc_compute_config(struct intel_dp
*intel_dp
,
2345 struct intel_crtc_state
*pipe_config
,
2346 struct drm_connector_state
*conn_state
,
2347 struct link_config_limits
*limits
)
2349 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
2350 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
2351 const struct drm_display_mode
*adjusted_mode
=
2352 &pipe_config
->hw
.adjusted_mode
;
2357 pipe_config
->fec_enable
= !intel_dp_is_edp(intel_dp
) &&
2358 intel_dp_supports_fec(intel_dp
, pipe_config
);
2360 if (!intel_dp_supports_dsc(intel_dp
, pipe_config
))
2363 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
2364 if (INTEL_GEN(dev_priv
) >= 12)
2365 dsc_max_bpc
= min_t(u8
, 12, conn_state
->max_requested_bpc
);
2367 dsc_max_bpc
= min_t(u8
, 10,
2368 conn_state
->max_requested_bpc
);
2370 pipe_bpp
= intel_dp_dsc_compute_bpp(intel_dp
, dsc_max_bpc
);
2372 /* Min Input BPC for ICL+ is 8 */
2373 if (pipe_bpp
< 8 * 3) {
2374 drm_dbg_kms(&dev_priv
->drm
,
2375 "No DSC support for less than 8bpc\n");
2380 * For now enable DSC for max bpp, max link rate, max lane count.
2381 * Optimize this later for the minimum possible link rate/lane count
2382 * with DSC enabled for the requested mode.
2384 pipe_config
->pipe_bpp
= pipe_bpp
;
2385 pipe_config
->port_clock
= intel_dp
->common_rates
[limits
->max_clock
];
2386 pipe_config
->lane_count
= limits
->max_lane_count
;
2388 if (intel_dp_is_edp(intel_dp
)) {
2389 pipe_config
->dsc
.compressed_bpp
=
2390 min_t(u16
, drm_edp_dsc_sink_output_bpp(intel_dp
->dsc_dpcd
) >> 4,
2391 pipe_config
->pipe_bpp
);
2392 pipe_config
->dsc
.slice_count
=
2393 drm_dp_dsc_sink_max_slice_count(intel_dp
->dsc_dpcd
,
2396 u16 dsc_max_output_bpp
;
2397 u8 dsc_dp_slice_count
;
2399 dsc_max_output_bpp
=
2400 intel_dp_dsc_get_output_bpp(dev_priv
,
2401 pipe_config
->port_clock
,
2402 pipe_config
->lane_count
,
2403 adjusted_mode
->crtc_clock
,
2404 adjusted_mode
->crtc_hdisplay
,
2405 pipe_config
->bigjoiner
);
2406 dsc_dp_slice_count
=
2407 intel_dp_dsc_get_slice_count(intel_dp
,
2408 adjusted_mode
->crtc_clock
,
2409 adjusted_mode
->crtc_hdisplay
,
2410 pipe_config
->bigjoiner
);
2411 if (!dsc_max_output_bpp
|| !dsc_dp_slice_count
) {
2412 drm_dbg_kms(&dev_priv
->drm
,
2413 "Compressed BPP/Slice Count not supported\n");
2416 pipe_config
->dsc
.compressed_bpp
= min_t(u16
,
2417 dsc_max_output_bpp
>> 4,
2418 pipe_config
->pipe_bpp
);
2419 pipe_config
->dsc
.slice_count
= dsc_dp_slice_count
;
2422 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2423 * is greater than the maximum Cdclock and if slice count is even
2424 * then we need to use 2 VDSC instances.
2426 if (adjusted_mode
->crtc_clock
> dev_priv
->max_cdclk_freq
||
2427 pipe_config
->bigjoiner
) {
2428 if (pipe_config
->dsc
.slice_count
< 2) {
2429 drm_dbg_kms(&dev_priv
->drm
,
2430 "Cannot split stream to use 2 VDSC instances\n");
2434 pipe_config
->dsc
.dsc_split
= true;
2437 ret
= intel_dp_dsc_compute_params(&dig_port
->base
, pipe_config
);
2439 drm_dbg_kms(&dev_priv
->drm
,
2440 "Cannot compute valid DSC parameters for Input Bpp = %d "
2441 "Compressed BPP = %d\n",
2442 pipe_config
->pipe_bpp
,
2443 pipe_config
->dsc
.compressed_bpp
);
2447 pipe_config
->dsc
.compression_enable
= true;
2448 drm_dbg_kms(&dev_priv
->drm
, "DP DSC computed with Input Bpp = %d "
2449 "Compressed Bpp = %d Slice Count = %d\n",
2450 pipe_config
->pipe_bpp
,
2451 pipe_config
->dsc
.compressed_bpp
,
2452 pipe_config
->dsc
.slice_count
);
2458 intel_dp_compute_link_config(struct intel_encoder
*encoder
,
2459 struct intel_crtc_state
*pipe_config
,
2460 struct drm_connector_state
*conn_state
)
2462 struct drm_i915_private
*i915
= to_i915(encoder
->base
.dev
);
2463 const struct drm_display_mode
*adjusted_mode
=
2464 &pipe_config
->hw
.adjusted_mode
;
2465 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
2466 struct link_config_limits limits
;
2470 common_len
= intel_dp_common_len_rate_limit(intel_dp
,
2471 intel_dp
->max_link_rate
);
2473 /* No common link rates between source and sink */
2474 drm_WARN_ON(encoder
->base
.dev
, common_len
<= 0);
2476 limits
.min_clock
= 0;
2477 limits
.max_clock
= common_len
- 1;
2479 limits
.min_lane_count
= 1;
2480 limits
.max_lane_count
= intel_dp_max_lane_count(intel_dp
);
2482 limits
.min_bpp
= intel_dp_min_bpp(pipe_config
->output_format
);
2483 limits
.max_bpp
= intel_dp_max_bpp(intel_dp
, pipe_config
);
2485 if (intel_dp_is_edp(intel_dp
)) {
2487 * Use the maximum clock and number of lanes the eDP panel
2488 * advertizes being capable of. The panels are generally
2489 * designed to support only a single clock and lane
2490 * configuration, and typically these values correspond to the
2491 * native resolution of the panel.
2493 limits
.min_lane_count
= limits
.max_lane_count
;
2494 limits
.min_clock
= limits
.max_clock
;
2497 intel_dp_adjust_compliance_config(intel_dp
, pipe_config
, &limits
);
2499 drm_dbg_kms(&i915
->drm
, "DP link computation with max lane count %i "
2500 "max rate %d max bpp %d pixel clock %iKHz\n",
2501 limits
.max_lane_count
,
2502 intel_dp
->common_rates
[limits
.max_clock
],
2503 limits
.max_bpp
, adjusted_mode
->crtc_clock
);
2505 if ((adjusted_mode
->crtc_clock
> i915
->max_dotclk_freq
||
2506 adjusted_mode
->crtc_hdisplay
> 5120) &&
2507 intel_dp_can_bigjoiner(intel_dp
))
2508 pipe_config
->bigjoiner
= true;
2511 * Optimize for slow and wide. This is the place to add alternative
2512 * optimization policy.
2514 ret
= intel_dp_compute_link_config_wide(intel_dp
, pipe_config
, &limits
);
2516 /* enable compression if the mode doesn't fit available BW */
2517 drm_dbg_kms(&i915
->drm
, "Force DSC en = %d\n", intel_dp
->force_dsc_en
);
2518 if (ret
|| intel_dp
->force_dsc_en
|| pipe_config
->bigjoiner
) {
2519 ret
= intel_dp_dsc_compute_config(intel_dp
, pipe_config
,
2520 conn_state
, &limits
);
2525 if (pipe_config
->dsc
.compression_enable
) {
2526 drm_dbg_kms(&i915
->drm
,
2527 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2528 pipe_config
->lane_count
, pipe_config
->port_clock
,
2529 pipe_config
->pipe_bpp
,
2530 pipe_config
->dsc
.compressed_bpp
);
2532 drm_dbg_kms(&i915
->drm
,
2533 "DP link rate required %i available %i\n",
2534 intel_dp_link_required(adjusted_mode
->crtc_clock
,
2535 pipe_config
->dsc
.compressed_bpp
),
2536 intel_dp_max_data_rate(pipe_config
->port_clock
,
2537 pipe_config
->lane_count
));
2539 drm_dbg_kms(&i915
->drm
, "DP lane count %d clock %d bpp %d\n",
2540 pipe_config
->lane_count
, pipe_config
->port_clock
,
2541 pipe_config
->pipe_bpp
);
2543 drm_dbg_kms(&i915
->drm
,
2544 "DP link rate required %i available %i\n",
2545 intel_dp_link_required(adjusted_mode
->crtc_clock
,
2546 pipe_config
->pipe_bpp
),
2547 intel_dp_max_data_rate(pipe_config
->port_clock
,
2548 pipe_config
->lane_count
));
2553 bool intel_dp_limited_color_range(const struct intel_crtc_state
*crtc_state
,
2554 const struct drm_connector_state
*conn_state
)
2556 const struct intel_digital_connector_state
*intel_conn_state
=
2557 to_intel_digital_connector_state(conn_state
);
2558 const struct drm_display_mode
*adjusted_mode
=
2559 &crtc_state
->hw
.adjusted_mode
;
2562 * Our YCbCr output is always limited range.
2563 * crtc_state->limited_color_range only applies to RGB,
2564 * and it must never be set for YCbCr or we risk setting
2565 * some conflicting bits in PIPECONF which will mess up
2566 * the colors on the monitor.
2568 if (crtc_state
->output_format
!= INTEL_OUTPUT_FORMAT_RGB
)
2571 if (intel_conn_state
->broadcast_rgb
== INTEL_BROADCAST_RGB_AUTO
) {
2574 * CEA-861-E - 5.1 Default Encoding Parameters
2575 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2577 return crtc_state
->pipe_bpp
!= 18 &&
2578 drm_default_rgb_quant_range(adjusted_mode
) ==
2579 HDMI_QUANTIZATION_RANGE_LIMITED
;
2581 return intel_conn_state
->broadcast_rgb
==
2582 INTEL_BROADCAST_RGB_LIMITED
;
2586 static bool intel_dp_port_has_audio(struct drm_i915_private
*dev_priv
,
2589 if (IS_G4X(dev_priv
))
2591 if (INTEL_GEN(dev_priv
) < 12 && port
== PORT_A
)
2597 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state
*crtc_state
,
2598 const struct drm_connector_state
*conn_state
,
2599 struct drm_dp_vsc_sdp
*vsc
)
2601 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
2602 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2605 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2606 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
2607 * Colorimetry Format indication.
2609 vsc
->revision
= 0x5;
2612 /* DP 1.4a spec, Table 2-120 */
2613 switch (crtc_state
->output_format
) {
2614 case INTEL_OUTPUT_FORMAT_YCBCR444
:
2615 vsc
->pixelformat
= DP_PIXELFORMAT_YUV444
;
2617 case INTEL_OUTPUT_FORMAT_YCBCR420
:
2618 vsc
->pixelformat
= DP_PIXELFORMAT_YUV420
;
2620 case INTEL_OUTPUT_FORMAT_RGB
:
2622 vsc
->pixelformat
= DP_PIXELFORMAT_RGB
;
2625 switch (conn_state
->colorspace
) {
2626 case DRM_MODE_COLORIMETRY_BT709_YCC
:
2627 vsc
->colorimetry
= DP_COLORIMETRY_BT709_YCC
;
2629 case DRM_MODE_COLORIMETRY_XVYCC_601
:
2630 vsc
->colorimetry
= DP_COLORIMETRY_XVYCC_601
;
2632 case DRM_MODE_COLORIMETRY_XVYCC_709
:
2633 vsc
->colorimetry
= DP_COLORIMETRY_XVYCC_709
;
2635 case DRM_MODE_COLORIMETRY_SYCC_601
:
2636 vsc
->colorimetry
= DP_COLORIMETRY_SYCC_601
;
2638 case DRM_MODE_COLORIMETRY_OPYCC_601
:
2639 vsc
->colorimetry
= DP_COLORIMETRY_OPYCC_601
;
2641 case DRM_MODE_COLORIMETRY_BT2020_CYCC
:
2642 vsc
->colorimetry
= DP_COLORIMETRY_BT2020_CYCC
;
2644 case DRM_MODE_COLORIMETRY_BT2020_RGB
:
2645 vsc
->colorimetry
= DP_COLORIMETRY_BT2020_RGB
;
2647 case DRM_MODE_COLORIMETRY_BT2020_YCC
:
2648 vsc
->colorimetry
= DP_COLORIMETRY_BT2020_YCC
;
2650 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65
:
2651 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER
:
2652 vsc
->colorimetry
= DP_COLORIMETRY_DCI_P3_RGB
;
2656 * RGB->YCBCR color conversion uses the BT.709
2659 if (crtc_state
->output_format
== INTEL_OUTPUT_FORMAT_YCBCR420
)
2660 vsc
->colorimetry
= DP_COLORIMETRY_BT709_YCC
;
2662 vsc
->colorimetry
= DP_COLORIMETRY_DEFAULT
;
2666 vsc
->bpc
= crtc_state
->pipe_bpp
/ 3;
2668 /* only RGB pixelformat supports 6 bpc */
2669 drm_WARN_ON(&dev_priv
->drm
,
2670 vsc
->bpc
== 6 && vsc
->pixelformat
!= DP_PIXELFORMAT_RGB
);
2672 /* all YCbCr are always limited range */
2673 vsc
->dynamic_range
= DP_DYNAMIC_RANGE_CTA
;
2674 vsc
->content_type
= DP_CONTENT_TYPE_NOT_DEFINED
;
2677 static void intel_dp_compute_vsc_sdp(struct intel_dp
*intel_dp
,
2678 struct intel_crtc_state
*crtc_state
,
2679 const struct drm_connector_state
*conn_state
)
2681 struct drm_dp_vsc_sdp
*vsc
= &crtc_state
->infoframes
.vsc
;
2683 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
2684 if (crtc_state
->has_psr
)
2687 if (!intel_dp_needs_vsc_sdp(crtc_state
, conn_state
))
2690 crtc_state
->infoframes
.enable
|= intel_hdmi_infoframe_enable(DP_SDP_VSC
);
2691 vsc
->sdp_type
= DP_SDP_VSC
;
2692 intel_dp_compute_vsc_colorimetry(crtc_state
, conn_state
,
2693 &crtc_state
->infoframes
.vsc
);
2696 void intel_dp_compute_psr_vsc_sdp(struct intel_dp
*intel_dp
,
2697 const struct intel_crtc_state
*crtc_state
,
2698 const struct drm_connector_state
*conn_state
,
2699 struct drm_dp_vsc_sdp
*vsc
)
2701 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2703 vsc
->sdp_type
= DP_SDP_VSC
;
2705 if (dev_priv
->psr
.psr2_enabled
) {
2706 if (dev_priv
->psr
.colorimetry_support
&&
2707 intel_dp_needs_vsc_sdp(crtc_state
, conn_state
)) {
2708 /* [PSR2, +Colorimetry] */
2709 intel_dp_compute_vsc_colorimetry(crtc_state
, conn_state
,
2713 * [PSR2, -Colorimetry]
2714 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
2715 * 3D stereo + PSR/PSR2 + Y-coordinate.
2717 vsc
->revision
= 0x4;
2723 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2724 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
2727 vsc
->revision
= 0x2;
2733 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp
*intel_dp
,
2734 struct intel_crtc_state
*crtc_state
,
2735 const struct drm_connector_state
*conn_state
)
2738 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2739 struct hdmi_drm_infoframe
*drm_infoframe
= &crtc_state
->infoframes
.drm
.drm
;
2741 if (!conn_state
->hdr_output_metadata
)
2744 ret
= drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe
, conn_state
);
2747 drm_dbg_kms(&dev_priv
->drm
, "couldn't set HDR metadata in infoframe\n");
2751 crtc_state
->infoframes
.enable
|=
2752 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA
);
2756 intel_dp_drrs_compute_config(struct intel_dp
*intel_dp
,
2757 struct intel_crtc_state
*pipe_config
,
2758 int output_bpp
, bool constant_n
)
2760 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
2761 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2764 * DRRS and PSR can't be enable together, so giving preference to PSR
2765 * as it allows more power-savings by complete shutting down display,
2766 * so to guarantee this, intel_dp_drrs_compute_config() must be called
2767 * after intel_psr_compute_config().
2769 if (pipe_config
->has_psr
)
2772 if (!intel_connector
->panel
.downclock_mode
||
2773 dev_priv
->drrs
.type
!= SEAMLESS_DRRS_SUPPORT
)
2776 pipe_config
->has_drrs
= true;
2777 intel_link_compute_m_n(output_bpp
, pipe_config
->lane_count
,
2778 intel_connector
->panel
.downclock_mode
->clock
,
2779 pipe_config
->port_clock
, &pipe_config
->dp_m2_n2
,
2780 constant_n
, pipe_config
->fec_enable
);
2784 intel_dp_compute_config(struct intel_encoder
*encoder
,
2785 struct intel_crtc_state
*pipe_config
,
2786 struct drm_connector_state
*conn_state
)
2788 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
2789 struct drm_display_mode
*adjusted_mode
= &pipe_config
->hw
.adjusted_mode
;
2790 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
2791 enum port port
= encoder
->port
;
2792 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
2793 struct intel_digital_connector_state
*intel_conn_state
=
2794 to_intel_digital_connector_state(conn_state
);
2795 bool constant_n
= drm_dp_has_quirk(&intel_dp
->desc
, 0,
2796 DP_DPCD_QUIRK_CONSTANT_N
);
2797 int ret
= 0, output_bpp
;
2799 if (HAS_PCH_SPLIT(dev_priv
) && !HAS_DDI(dev_priv
) && port
!= PORT_A
)
2800 pipe_config
->has_pch_encoder
= true;
2802 pipe_config
->output_format
= intel_dp_output_format(&intel_connector
->base
,
2805 if (pipe_config
->output_format
== INTEL_OUTPUT_FORMAT_YCBCR420
) {
2806 ret
= intel_pch_panel_fitting(pipe_config
, conn_state
);
2811 if (!intel_dp_port_has_audio(dev_priv
, port
))
2812 pipe_config
->has_audio
= false;
2813 else if (intel_conn_state
->force_audio
== HDMI_AUDIO_AUTO
)
2814 pipe_config
->has_audio
= intel_dp
->has_audio
;
2816 pipe_config
->has_audio
= intel_conn_state
->force_audio
== HDMI_AUDIO_ON
;
2818 if (intel_dp_is_edp(intel_dp
) && intel_connector
->panel
.fixed_mode
) {
2819 intel_fixed_panel_mode(intel_connector
->panel
.fixed_mode
,
2822 if (HAS_GMCH(dev_priv
))
2823 ret
= intel_gmch_panel_fitting(pipe_config
, conn_state
);
2825 ret
= intel_pch_panel_fitting(pipe_config
, conn_state
);
2830 if (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLSCAN
)
2833 if (HAS_GMCH(dev_priv
) &&
2834 adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
2837 if (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
2840 if (intel_dp_hdisplay_bad(dev_priv
, adjusted_mode
->crtc_hdisplay
))
2843 ret
= intel_dp_compute_link_config(encoder
, pipe_config
, conn_state
);
2847 pipe_config
->limited_color_range
=
2848 intel_dp_limited_color_range(pipe_config
, conn_state
);
2850 if (pipe_config
->dsc
.compression_enable
)
2851 output_bpp
= pipe_config
->dsc
.compressed_bpp
;
2853 output_bpp
= intel_dp_output_bpp(pipe_config
->output_format
,
2854 pipe_config
->pipe_bpp
);
2856 intel_link_compute_m_n(output_bpp
,
2857 pipe_config
->lane_count
,
2858 adjusted_mode
->crtc_clock
,
2859 pipe_config
->port_clock
,
2860 &pipe_config
->dp_m_n
,
2861 constant_n
, pipe_config
->fec_enable
);
2863 if (!HAS_DDI(dev_priv
))
2864 intel_dp_set_clock(encoder
, pipe_config
);
2866 intel_psr_compute_config(intel_dp
, pipe_config
);
2867 intel_dp_drrs_compute_config(intel_dp
, pipe_config
, output_bpp
,
2869 intel_dp_compute_vsc_sdp(intel_dp
, pipe_config
, conn_state
);
2870 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp
, pipe_config
, conn_state
);
2875 void intel_dp_set_link_params(struct intel_dp
*intel_dp
,
2876 int link_rate
, int lane_count
)
2878 intel_dp
->link_trained
= false;
2879 intel_dp
->link_rate
= link_rate
;
2880 intel_dp
->lane_count
= lane_count
;
2883 static void intel_dp_prepare(struct intel_encoder
*encoder
,
2884 const struct intel_crtc_state
*pipe_config
)
2886 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
2887 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
2888 enum port port
= encoder
->port
;
2889 struct intel_crtc
*crtc
= to_intel_crtc(pipe_config
->uapi
.crtc
);
2890 const struct drm_display_mode
*adjusted_mode
= &pipe_config
->hw
.adjusted_mode
;
2892 intel_dp_set_link_params(intel_dp
,
2893 pipe_config
->port_clock
,
2894 pipe_config
->lane_count
);
2897 * There are four kinds of DP registers:
2904 * IBX PCH and CPU are the same for almost everything,
2905 * except that the CPU DP PLL is configured in this
2908 * CPT PCH is quite different, having many bits moved
2909 * to the TRANS_DP_CTL register instead. That
2910 * configuration happens (oddly) in ilk_pch_enable
2913 /* Preserve the BIOS-computed detected bit. This is
2914 * supposed to be read-only.
2916 intel_dp
->DP
= intel_de_read(dev_priv
, intel_dp
->output_reg
) & DP_DETECTED
;
2918 /* Handle DP bits in common between all three register formats */
2919 intel_dp
->DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
2920 intel_dp
->DP
|= DP_PORT_WIDTH(pipe_config
->lane_count
);
2922 /* Split out the IBX/CPU vs CPT settings */
2924 if (IS_IVYBRIDGE(dev_priv
) && port
== PORT_A
) {
2925 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
2926 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
2927 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
2928 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
2929 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
2931 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
2932 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
2934 intel_dp
->DP
|= DP_PIPE_SEL_IVB(crtc
->pipe
);
2935 } else if (HAS_PCH_CPT(dev_priv
) && port
!= PORT_A
) {
2938 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
2940 trans_dp
= intel_de_read(dev_priv
, TRANS_DP_CTL(crtc
->pipe
));
2941 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
2942 trans_dp
|= TRANS_DP_ENH_FRAMING
;
2944 trans_dp
&= ~TRANS_DP_ENH_FRAMING
;
2945 intel_de_write(dev_priv
, TRANS_DP_CTL(crtc
->pipe
), trans_dp
);
2947 if (IS_G4X(dev_priv
) && pipe_config
->limited_color_range
)
2948 intel_dp
->DP
|= DP_COLOR_RANGE_16_235
;
2950 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
2951 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
2952 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
2953 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
2954 intel_dp
->DP
|= DP_LINK_TRAIN_OFF
;
2956 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
2957 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
2959 if (IS_CHERRYVIEW(dev_priv
))
2960 intel_dp
->DP
|= DP_PIPE_SEL_CHV(crtc
->pipe
);
2962 intel_dp
->DP
|= DP_PIPE_SEL(crtc
->pipe
);
2966 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2967 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
2969 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2970 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
2972 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2973 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
2975 static void intel_pps_verify_state(struct intel_dp
*intel_dp
);
2977 static void wait_panel_status(struct intel_dp
*intel_dp
,
2981 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
2982 i915_reg_t pp_stat_reg
, pp_ctrl_reg
;
2984 lockdep_assert_held(&dev_priv
->pps_mutex
);
2986 intel_pps_verify_state(intel_dp
);
2988 pp_stat_reg
= _pp_stat_reg(intel_dp
);
2989 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2991 drm_dbg_kms(&dev_priv
->drm
,
2992 "mask %08x value %08x status %08x control %08x\n",
2994 intel_de_read(dev_priv
, pp_stat_reg
),
2995 intel_de_read(dev_priv
, pp_ctrl_reg
));
2997 if (intel_de_wait_for_register(dev_priv
, pp_stat_reg
,
2999 drm_err(&dev_priv
->drm
,
3000 "Panel status timeout: status %08x control %08x\n",
3001 intel_de_read(dev_priv
, pp_stat_reg
),
3002 intel_de_read(dev_priv
, pp_ctrl_reg
));
3004 drm_dbg_kms(&dev_priv
->drm
, "Wait complete\n");
3007 static void wait_panel_on(struct intel_dp
*intel_dp
)
3009 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
3011 drm_dbg_kms(&i915
->drm
, "Wait for panel power on\n");
3012 wait_panel_status(intel_dp
, IDLE_ON_MASK
, IDLE_ON_VALUE
);
3015 static void wait_panel_off(struct intel_dp
*intel_dp
)
3017 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
3019 drm_dbg_kms(&i915
->drm
, "Wait for panel power off time\n");
3020 wait_panel_status(intel_dp
, IDLE_OFF_MASK
, IDLE_OFF_VALUE
);
3023 static void wait_panel_power_cycle(struct intel_dp
*intel_dp
)
3025 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
3026 ktime_t panel_power_on_time
;
3027 s64 panel_power_off_duration
;
3029 drm_dbg_kms(&i915
->drm
, "Wait for panel power cycle\n");
3031 /* take the difference of currrent time and panel power off time
3032 * and then make panel wait for t11_t12 if needed. */
3033 panel_power_on_time
= ktime_get_boottime();
3034 panel_power_off_duration
= ktime_ms_delta(panel_power_on_time
, intel_dp
->panel_power_off_time
);
3036 /* When we disable the VDD override bit last we have to do the manual
3038 if (panel_power_off_duration
< (s64
)intel_dp
->panel_power_cycle_delay
)
3039 wait_remaining_ms_from_jiffies(jiffies
,
3040 intel_dp
->panel_power_cycle_delay
- panel_power_off_duration
);
3042 wait_panel_status(intel_dp
, IDLE_CYCLE_MASK
, IDLE_CYCLE_VALUE
);
3045 static void wait_backlight_on(struct intel_dp
*intel_dp
)
3047 wait_remaining_ms_from_jiffies(intel_dp
->last_power_on
,
3048 intel_dp
->backlight_on_delay
);
3051 static void edp_wait_backlight_off(struct intel_dp
*intel_dp
)
3053 wait_remaining_ms_from_jiffies(intel_dp
->last_backlight_off
,
3054 intel_dp
->backlight_off_delay
);
3057 /* Read the current pp_control value, unlocking the register if it
3061 static u32
ilk_get_pp_control(struct intel_dp
*intel_dp
)
3063 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3066 lockdep_assert_held(&dev_priv
->pps_mutex
);
3068 control
= intel_de_read(dev_priv
, _pp_ctrl_reg(intel_dp
));
3069 if (drm_WARN_ON(&dev_priv
->drm
, !HAS_DDI(dev_priv
) &&
3070 (control
& PANEL_UNLOCK_MASK
) != PANEL_UNLOCK_REGS
)) {
3071 control
&= ~PANEL_UNLOCK_MASK
;
3072 control
|= PANEL_UNLOCK_REGS
;
3078 * Must be paired with edp_panel_vdd_off().
3079 * Must hold pps_mutex around the whole on/off sequence.
3080 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
3082 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
)
3084 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3085 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3087 i915_reg_t pp_stat_reg
, pp_ctrl_reg
;
3088 bool need_to_disable
= !intel_dp
->want_panel_vdd
;
3090 lockdep_assert_held(&dev_priv
->pps_mutex
);
3092 if (!intel_dp_is_edp(intel_dp
))
3095 cancel_delayed_work(&intel_dp
->panel_vdd_work
);
3096 intel_dp
->want_panel_vdd
= true;
3098 if (edp_have_panel_vdd(intel_dp
))
3099 return need_to_disable
;
3101 intel_display_power_get(dev_priv
,
3102 intel_aux_power_domain(dig_port
));
3104 drm_dbg_kms(&dev_priv
->drm
, "Turning [ENCODER:%d:%s] VDD on\n",
3105 dig_port
->base
.base
.base
.id
,
3106 dig_port
->base
.base
.name
);
3108 if (!edp_have_panel_power(intel_dp
))
3109 wait_panel_power_cycle(intel_dp
);
3111 pp
= ilk_get_pp_control(intel_dp
);
3112 pp
|= EDP_FORCE_VDD
;
3114 pp_stat_reg
= _pp_stat_reg(intel_dp
);
3115 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
3117 intel_de_write(dev_priv
, pp_ctrl_reg
, pp
);
3118 intel_de_posting_read(dev_priv
, pp_ctrl_reg
);
3119 drm_dbg_kms(&dev_priv
->drm
, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
3120 intel_de_read(dev_priv
, pp_stat_reg
),
3121 intel_de_read(dev_priv
, pp_ctrl_reg
));
3123 * If the panel wasn't on, delay before accessing aux channel
3125 if (!edp_have_panel_power(intel_dp
)) {
3126 drm_dbg_kms(&dev_priv
->drm
,
3127 "[ENCODER:%d:%s] panel power wasn't enabled\n",
3128 dig_port
->base
.base
.base
.id
,
3129 dig_port
->base
.base
.name
);
3130 msleep(intel_dp
->panel_power_up_delay
);
3133 return need_to_disable
;
3137 * Must be paired with intel_edp_panel_vdd_off() or
3138 * intel_edp_panel_off().
3139 * Nested calls to these functions are not allowed since
3140 * we drop the lock. Caller must use some higher level
3141 * locking to prevent nested calls from other threads.
3143 void intel_edp_panel_vdd_on(struct intel_dp
*intel_dp
)
3145 intel_wakeref_t wakeref
;
3148 if (!intel_dp_is_edp(intel_dp
))
3152 with_pps_lock(intel_dp
, wakeref
)
3153 vdd
= edp_panel_vdd_on(intel_dp
);
3154 I915_STATE_WARN(!vdd
, "[ENCODER:%d:%s] VDD already requested on\n",
3155 dp_to_dig_port(intel_dp
)->base
.base
.base
.id
,
3156 dp_to_dig_port(intel_dp
)->base
.base
.name
);
3159 static void edp_panel_vdd_off_sync(struct intel_dp
*intel_dp
)
3161 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3162 struct intel_digital_port
*dig_port
=
3163 dp_to_dig_port(intel_dp
);
3165 i915_reg_t pp_stat_reg
, pp_ctrl_reg
;
3167 lockdep_assert_held(&dev_priv
->pps_mutex
);
3169 drm_WARN_ON(&dev_priv
->drm
, intel_dp
->want_panel_vdd
);
3171 if (!edp_have_panel_vdd(intel_dp
))
3174 drm_dbg_kms(&dev_priv
->drm
, "Turning [ENCODER:%d:%s] VDD off\n",
3175 dig_port
->base
.base
.base
.id
,
3176 dig_port
->base
.base
.name
);
3178 pp
= ilk_get_pp_control(intel_dp
);
3179 pp
&= ~EDP_FORCE_VDD
;
3181 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
3182 pp_stat_reg
= _pp_stat_reg(intel_dp
);
3184 intel_de_write(dev_priv
, pp_ctrl_reg
, pp
);
3185 intel_de_posting_read(dev_priv
, pp_ctrl_reg
);
3187 /* Make sure sequencer is idle before allowing subsequent activity */
3188 drm_dbg_kms(&dev_priv
->drm
, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
3189 intel_de_read(dev_priv
, pp_stat_reg
),
3190 intel_de_read(dev_priv
, pp_ctrl_reg
));
3192 if ((pp
& PANEL_POWER_ON
) == 0)
3193 intel_dp
->panel_power_off_time
= ktime_get_boottime();
3195 intel_display_power_put_unchecked(dev_priv
,
3196 intel_aux_power_domain(dig_port
));
3199 static void edp_panel_vdd_work(struct work_struct
*__work
)
3201 struct intel_dp
*intel_dp
=
3202 container_of(to_delayed_work(__work
),
3203 struct intel_dp
, panel_vdd_work
);
3204 intel_wakeref_t wakeref
;
3206 with_pps_lock(intel_dp
, wakeref
) {
3207 if (!intel_dp
->want_panel_vdd
)
3208 edp_panel_vdd_off_sync(intel_dp
);
3212 static void edp_panel_vdd_schedule_off(struct intel_dp
*intel_dp
)
3214 unsigned long delay
;
3217 * Queue the timer to fire a long time from now (relative to the power
3218 * down delay) to keep the panel power up across a sequence of
3221 delay
= msecs_to_jiffies(intel_dp
->panel_power_cycle_delay
* 5);
3222 schedule_delayed_work(&intel_dp
->panel_vdd_work
, delay
);
3226 * Must be paired with edp_panel_vdd_on().
3227 * Must hold pps_mutex around the whole on/off sequence.
3228 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
3230 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
)
3232 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3234 lockdep_assert_held(&dev_priv
->pps_mutex
);
3236 if (!intel_dp_is_edp(intel_dp
))
3239 I915_STATE_WARN(!intel_dp
->want_panel_vdd
, "[ENCODER:%d:%s] VDD not forced on",
3240 dp_to_dig_port(intel_dp
)->base
.base
.base
.id
,
3241 dp_to_dig_port(intel_dp
)->base
.base
.name
);
3243 intel_dp
->want_panel_vdd
= false;
3246 edp_panel_vdd_off_sync(intel_dp
);
3248 edp_panel_vdd_schedule_off(intel_dp
);
3251 static void edp_panel_on(struct intel_dp
*intel_dp
)
3253 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3255 i915_reg_t pp_ctrl_reg
;
3257 lockdep_assert_held(&dev_priv
->pps_mutex
);
3259 if (!intel_dp_is_edp(intel_dp
))
3262 drm_dbg_kms(&dev_priv
->drm
, "Turn [ENCODER:%d:%s] panel power on\n",
3263 dp_to_dig_port(intel_dp
)->base
.base
.base
.id
,
3264 dp_to_dig_port(intel_dp
)->base
.base
.name
);
3266 if (drm_WARN(&dev_priv
->drm
, edp_have_panel_power(intel_dp
),
3267 "[ENCODER:%d:%s] panel power already on\n",
3268 dp_to_dig_port(intel_dp
)->base
.base
.base
.id
,
3269 dp_to_dig_port(intel_dp
)->base
.base
.name
))
3272 wait_panel_power_cycle(intel_dp
);
3274 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
3275 pp
= ilk_get_pp_control(intel_dp
);
3276 if (IS_GEN(dev_priv
, 5)) {
3277 /* ILK workaround: disable reset around power sequence */
3278 pp
&= ~PANEL_POWER_RESET
;
3279 intel_de_write(dev_priv
, pp_ctrl_reg
, pp
);
3280 intel_de_posting_read(dev_priv
, pp_ctrl_reg
);
3283 pp
|= PANEL_POWER_ON
;
3284 if (!IS_GEN(dev_priv
, 5))
3285 pp
|= PANEL_POWER_RESET
;
3287 intel_de_write(dev_priv
, pp_ctrl_reg
, pp
);
3288 intel_de_posting_read(dev_priv
, pp_ctrl_reg
);
3290 wait_panel_on(intel_dp
);
3291 intel_dp
->last_power_on
= jiffies
;
3293 if (IS_GEN(dev_priv
, 5)) {
3294 pp
|= PANEL_POWER_RESET
; /* restore panel reset bit */
3295 intel_de_write(dev_priv
, pp_ctrl_reg
, pp
);
3296 intel_de_posting_read(dev_priv
, pp_ctrl_reg
);
3300 void intel_edp_panel_on(struct intel_dp
*intel_dp
)
3302 intel_wakeref_t wakeref
;
3304 if (!intel_dp_is_edp(intel_dp
))
3307 with_pps_lock(intel_dp
, wakeref
)
3308 edp_panel_on(intel_dp
);
3312 static void edp_panel_off(struct intel_dp
*intel_dp
)
3314 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3315 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3317 i915_reg_t pp_ctrl_reg
;
3319 lockdep_assert_held(&dev_priv
->pps_mutex
);
3321 if (!intel_dp_is_edp(intel_dp
))
3324 drm_dbg_kms(&dev_priv
->drm
, "Turn [ENCODER:%d:%s] panel power off\n",
3325 dig_port
->base
.base
.base
.id
, dig_port
->base
.base
.name
);
3327 drm_WARN(&dev_priv
->drm
, !intel_dp
->want_panel_vdd
,
3328 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
3329 dig_port
->base
.base
.base
.id
, dig_port
->base
.base
.name
);
3331 pp
= ilk_get_pp_control(intel_dp
);
3332 /* We need to switch off panel power _and_ force vdd, for otherwise some
3333 * panels get very unhappy and cease to work. */
3334 pp
&= ~(PANEL_POWER_ON
| PANEL_POWER_RESET
| EDP_FORCE_VDD
|
3337 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
3339 intel_dp
->want_panel_vdd
= false;
3341 intel_de_write(dev_priv
, pp_ctrl_reg
, pp
);
3342 intel_de_posting_read(dev_priv
, pp_ctrl_reg
);
3344 wait_panel_off(intel_dp
);
3345 intel_dp
->panel_power_off_time
= ktime_get_boottime();
3347 /* We got a reference when we enabled the VDD. */
3348 intel_display_power_put_unchecked(dev_priv
, intel_aux_power_domain(dig_port
));
3351 void intel_edp_panel_off(struct intel_dp
*intel_dp
)
3353 intel_wakeref_t wakeref
;
3355 if (!intel_dp_is_edp(intel_dp
))
3358 with_pps_lock(intel_dp
, wakeref
)
3359 edp_panel_off(intel_dp
);
3362 /* Enable backlight in the panel power control. */
3363 static void _intel_edp_backlight_on(struct intel_dp
*intel_dp
)
3365 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3366 intel_wakeref_t wakeref
;
3369 * If we enable the backlight right away following a panel power
3370 * on, we may see slight flicker as the panel syncs with the eDP
3371 * link. So delay a bit to make sure the image is solid before
3372 * allowing it to appear.
3374 wait_backlight_on(intel_dp
);
3376 with_pps_lock(intel_dp
, wakeref
) {
3377 i915_reg_t pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
3380 pp
= ilk_get_pp_control(intel_dp
);
3381 pp
|= EDP_BLC_ENABLE
;
3383 intel_de_write(dev_priv
, pp_ctrl_reg
, pp
);
3384 intel_de_posting_read(dev_priv
, pp_ctrl_reg
);
3388 /* Enable backlight PWM and backlight PP control. */
3389 void intel_edp_backlight_on(const struct intel_crtc_state
*crtc_state
,
3390 const struct drm_connector_state
*conn_state
)
3392 struct intel_dp
*intel_dp
= enc_to_intel_dp(to_intel_encoder(conn_state
->best_encoder
));
3393 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
3395 if (!intel_dp_is_edp(intel_dp
))
3398 drm_dbg_kms(&i915
->drm
, "\n");
3400 intel_panel_enable_backlight(crtc_state
, conn_state
);
3401 _intel_edp_backlight_on(intel_dp
);
3404 /* Disable backlight in the panel power control. */
3405 static void _intel_edp_backlight_off(struct intel_dp
*intel_dp
)
3407 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3408 intel_wakeref_t wakeref
;
3410 if (!intel_dp_is_edp(intel_dp
))
3413 with_pps_lock(intel_dp
, wakeref
) {
3414 i915_reg_t pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
3417 pp
= ilk_get_pp_control(intel_dp
);
3418 pp
&= ~EDP_BLC_ENABLE
;
3420 intel_de_write(dev_priv
, pp_ctrl_reg
, pp
);
3421 intel_de_posting_read(dev_priv
, pp_ctrl_reg
);
3424 intel_dp
->last_backlight_off
= jiffies
;
3425 edp_wait_backlight_off(intel_dp
);
3428 /* Disable backlight PP control and backlight PWM. */
3429 void intel_edp_backlight_off(const struct drm_connector_state
*old_conn_state
)
3431 struct intel_dp
*intel_dp
= enc_to_intel_dp(to_intel_encoder(old_conn_state
->best_encoder
));
3432 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
3434 if (!intel_dp_is_edp(intel_dp
))
3437 drm_dbg_kms(&i915
->drm
, "\n");
3439 _intel_edp_backlight_off(intel_dp
);
3440 intel_panel_disable_backlight(old_conn_state
);
3444 * Hook for controlling the panel power control backlight through the bl_power
3445 * sysfs attribute. Take care to handle multiple calls.
3447 static void intel_edp_backlight_power(struct intel_connector
*connector
,
3450 struct drm_i915_private
*i915
= to_i915(connector
->base
.dev
);
3451 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
3452 intel_wakeref_t wakeref
;
3456 with_pps_lock(intel_dp
, wakeref
)
3457 is_enabled
= ilk_get_pp_control(intel_dp
) & EDP_BLC_ENABLE
;
3458 if (is_enabled
== enable
)
3461 drm_dbg_kms(&i915
->drm
, "panel power control backlight %s\n",
3462 enable
? "enable" : "disable");
3465 _intel_edp_backlight_on(intel_dp
);
3467 _intel_edp_backlight_off(intel_dp
);
3470 static void assert_dp_port(struct intel_dp
*intel_dp
, bool state
)
3472 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3473 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
3474 bool cur_state
= intel_de_read(dev_priv
, intel_dp
->output_reg
) & DP_PORT_EN
;
3476 I915_STATE_WARN(cur_state
!= state
,
3477 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
3478 dig_port
->base
.base
.base
.id
, dig_port
->base
.base
.name
,
3479 onoff(state
), onoff(cur_state
));
3481 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
3483 static void assert_edp_pll(struct drm_i915_private
*dev_priv
, bool state
)
3485 bool cur_state
= intel_de_read(dev_priv
, DP_A
) & DP_PLL_ENABLE
;
3487 I915_STATE_WARN(cur_state
!= state
,
3488 "eDP PLL state assertion failure (expected %s, current %s)\n",
3489 onoff(state
), onoff(cur_state
));
3491 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
3492 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
3494 static void ilk_edp_pll_on(struct intel_dp
*intel_dp
,
3495 const struct intel_crtc_state
*pipe_config
)
3497 struct intel_crtc
*crtc
= to_intel_crtc(pipe_config
->uapi
.crtc
);
3498 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
3500 assert_pipe_disabled(dev_priv
, pipe_config
->cpu_transcoder
);
3501 assert_dp_port_disabled(intel_dp
);
3502 assert_edp_pll_disabled(dev_priv
);
3504 drm_dbg_kms(&dev_priv
->drm
, "enabling eDP PLL for clock %d\n",
3505 pipe_config
->port_clock
);
3507 intel_dp
->DP
&= ~DP_PLL_FREQ_MASK
;
3509 if (pipe_config
->port_clock
== 162000)
3510 intel_dp
->DP
|= DP_PLL_FREQ_162MHZ
;
3512 intel_dp
->DP
|= DP_PLL_FREQ_270MHZ
;
3514 intel_de_write(dev_priv
, DP_A
, intel_dp
->DP
);
3515 intel_de_posting_read(dev_priv
, DP_A
);
3519 * [DevILK] Work around required when enabling DP PLL
3520 * while a pipe is enabled going to FDI:
3521 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
3522 * 2. Program DP PLL enable
3524 if (IS_GEN(dev_priv
, 5))
3525 intel_wait_for_vblank_if_active(dev_priv
, !crtc
->pipe
);
3527 intel_dp
->DP
|= DP_PLL_ENABLE
;
3529 intel_de_write(dev_priv
, DP_A
, intel_dp
->DP
);
3530 intel_de_posting_read(dev_priv
, DP_A
);
3534 static void ilk_edp_pll_off(struct intel_dp
*intel_dp
,
3535 const struct intel_crtc_state
*old_crtc_state
)
3537 struct intel_crtc
*crtc
= to_intel_crtc(old_crtc_state
->uapi
.crtc
);
3538 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
3540 assert_pipe_disabled(dev_priv
, old_crtc_state
->cpu_transcoder
);
3541 assert_dp_port_disabled(intel_dp
);
3542 assert_edp_pll_enabled(dev_priv
);
3544 drm_dbg_kms(&dev_priv
->drm
, "disabling eDP PLL\n");
3546 intel_dp
->DP
&= ~DP_PLL_ENABLE
;
3548 intel_de_write(dev_priv
, DP_A
, intel_dp
->DP
);
3549 intel_de_posting_read(dev_priv
, DP_A
);
3553 static bool downstream_hpd_needs_d0(struct intel_dp
*intel_dp
)
3556 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3557 * be capable of signalling downstream hpd with a long pulse.
3558 * Whether or not that means D3 is safe to use is not clear,
3559 * but let's assume so until proven otherwise.
3561 * FIXME should really check all downstream ports...
3563 return intel_dp
->dpcd
[DP_DPCD_REV
] == 0x11 &&
3564 drm_dp_is_branch(intel_dp
->dpcd
) &&
3565 intel_dp
->downstream_ports
[0] & DP_DS_PORT_HPD
;
3568 void intel_dp_sink_set_decompression_state(struct intel_dp
*intel_dp
,
3569 const struct intel_crtc_state
*crtc_state
,
3572 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
3575 if (!crtc_state
->dsc
.compression_enable
)
3578 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_DSC_ENABLE
,
3579 enable
? DP_DECOMPRESSION_EN
: 0);
3581 drm_dbg_kms(&i915
->drm
,
3582 "Failed to %s sink decompression state\n",
3583 enable
? "enable" : "disable");
3586 /* If the device supports it, try to set the power state appropriately */
3587 void intel_dp_set_power(struct intel_dp
*intel_dp
, u8 mode
)
3589 struct intel_encoder
*encoder
= &dp_to_dig_port(intel_dp
)->base
;
3590 struct drm_i915_private
*i915
= to_i915(encoder
->base
.dev
);
3593 /* Should have a valid DPCD by this point */
3594 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x11)
3597 if (mode
!= DP_SET_POWER_D0
) {
3598 if (downstream_hpd_needs_d0(intel_dp
))
3601 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
, mode
);
3603 struct intel_lspcon
*lspcon
= dp_to_lspcon(intel_dp
);
3605 lspcon_resume(dp_to_dig_port(intel_dp
));
3608 * When turning on, we need to retry for 1ms to give the sink
3611 for (i
= 0; i
< 3; i
++) {
3612 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
, mode
);
3618 if (ret
== 1 && lspcon
->active
)
3619 lspcon_wait_pcon_mode(lspcon
);
3623 drm_dbg_kms(&i915
->drm
, "[ENCODER:%d:%s] Set power to %s failed\n",
3624 encoder
->base
.base
.id
, encoder
->base
.name
,
3625 mode
== DP_SET_POWER_D0
? "D0" : "D3");
3628 static bool cpt_dp_port_selected(struct drm_i915_private
*dev_priv
,
3629 enum port port
, enum pipe
*pipe
)
3633 for_each_pipe(dev_priv
, p
) {
3634 u32 val
= intel_de_read(dev_priv
, TRANS_DP_CTL(p
));
3636 if ((val
& TRANS_DP_PORT_SEL_MASK
) == TRANS_DP_PORT_SEL(port
)) {
3642 drm_dbg_kms(&dev_priv
->drm
, "No pipe for DP port %c found\n",
3645 /* must initialize pipe to something for the asserts */
3651 bool intel_dp_port_enabled(struct drm_i915_private
*dev_priv
,
3652 i915_reg_t dp_reg
, enum port port
,
3658 val
= intel_de_read(dev_priv
, dp_reg
);
3660 ret
= val
& DP_PORT_EN
;
3662 /* asserts want to know the pipe even if the port is disabled */
3663 if (IS_IVYBRIDGE(dev_priv
) && port
== PORT_A
)
3664 *pipe
= (val
& DP_PIPE_SEL_MASK_IVB
) >> DP_PIPE_SEL_SHIFT_IVB
;
3665 else if (HAS_PCH_CPT(dev_priv
) && port
!= PORT_A
)
3666 ret
&= cpt_dp_port_selected(dev_priv
, port
, pipe
);
3667 else if (IS_CHERRYVIEW(dev_priv
))
3668 *pipe
= (val
& DP_PIPE_SEL_MASK_CHV
) >> DP_PIPE_SEL_SHIFT_CHV
;
3670 *pipe
= (val
& DP_PIPE_SEL_MASK
) >> DP_PIPE_SEL_SHIFT
;
3675 static bool intel_dp_get_hw_state(struct intel_encoder
*encoder
,
3678 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
3679 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
3680 intel_wakeref_t wakeref
;
3683 wakeref
= intel_display_power_get_if_enabled(dev_priv
,
3684 encoder
->power_domain
);
3688 ret
= intel_dp_port_enabled(dev_priv
, intel_dp
->output_reg
,
3689 encoder
->port
, pipe
);
3691 intel_display_power_put(dev_priv
, encoder
->power_domain
, wakeref
);
3696 static void intel_dp_get_config(struct intel_encoder
*encoder
,
3697 struct intel_crtc_state
*pipe_config
)
3699 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
3700 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
3702 enum port port
= encoder
->port
;
3703 struct intel_crtc
*crtc
= to_intel_crtc(pipe_config
->uapi
.crtc
);
3705 if (encoder
->type
== INTEL_OUTPUT_EDP
)
3706 pipe_config
->output_types
|= BIT(INTEL_OUTPUT_EDP
);
3708 pipe_config
->output_types
|= BIT(INTEL_OUTPUT_DP
);
3710 tmp
= intel_de_read(dev_priv
, intel_dp
->output_reg
);
3712 pipe_config
->has_audio
= tmp
& DP_AUDIO_OUTPUT_ENABLE
&& port
!= PORT_A
;
3714 if (HAS_PCH_CPT(dev_priv
) && port
!= PORT_A
) {
3715 u32 trans_dp
= intel_de_read(dev_priv
,
3716 TRANS_DP_CTL(crtc
->pipe
));
3718 if (trans_dp
& TRANS_DP_HSYNC_ACTIVE_HIGH
)
3719 flags
|= DRM_MODE_FLAG_PHSYNC
;
3721 flags
|= DRM_MODE_FLAG_NHSYNC
;
3723 if (trans_dp
& TRANS_DP_VSYNC_ACTIVE_HIGH
)
3724 flags
|= DRM_MODE_FLAG_PVSYNC
;
3726 flags
|= DRM_MODE_FLAG_NVSYNC
;
3728 if (tmp
& DP_SYNC_HS_HIGH
)
3729 flags
|= DRM_MODE_FLAG_PHSYNC
;
3731 flags
|= DRM_MODE_FLAG_NHSYNC
;
3733 if (tmp
& DP_SYNC_VS_HIGH
)
3734 flags
|= DRM_MODE_FLAG_PVSYNC
;
3736 flags
|= DRM_MODE_FLAG_NVSYNC
;
3739 pipe_config
->hw
.adjusted_mode
.flags
|= flags
;
3741 if (IS_G4X(dev_priv
) && tmp
& DP_COLOR_RANGE_16_235
)
3742 pipe_config
->limited_color_range
= true;
3744 pipe_config
->lane_count
=
3745 ((tmp
& DP_PORT_WIDTH_MASK
) >> DP_PORT_WIDTH_SHIFT
) + 1;
3747 intel_dp_get_m_n(crtc
, pipe_config
);
3749 if (port
== PORT_A
) {
3750 if ((intel_de_read(dev_priv
, DP_A
) & DP_PLL_FREQ_MASK
) == DP_PLL_FREQ_162MHZ
)
3751 pipe_config
->port_clock
= 162000;
3753 pipe_config
->port_clock
= 270000;
3756 pipe_config
->hw
.adjusted_mode
.crtc_clock
=
3757 intel_dotclock_calculate(pipe_config
->port_clock
,
3758 &pipe_config
->dp_m_n
);
3760 if (intel_dp_is_edp(intel_dp
) && dev_priv
->vbt
.edp
.bpp
&&
3761 pipe_config
->pipe_bpp
> dev_priv
->vbt
.edp
.bpp
) {
3763 * This is a big fat ugly hack.
3765 * Some machines in UEFI boot mode provide us a VBT that has 18
3766 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3767 * unknown we fail to light up. Yet the same BIOS boots up with
3768 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3769 * max, not what it tells us to use.
3771 * Note: This will still be broken if the eDP panel is not lit
3772 * up by the BIOS, and thus we can't get the mode at module
3775 drm_dbg_kms(&dev_priv
->drm
,
3776 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3777 pipe_config
->pipe_bpp
, dev_priv
->vbt
.edp
.bpp
);
3778 dev_priv
->vbt
.edp
.bpp
= pipe_config
->pipe_bpp
;
3783 intel_dp_get_dpcd(struct intel_dp
*intel_dp
);
3786 * intel_dp_sync_state - sync the encoder state during init/resume
3787 * @encoder: intel encoder to sync
3788 * @crtc_state: state for the CRTC connected to the encoder
3790 * Sync any state stored in the encoder wrt. HW state during driver init
3791 * and system resume.
3793 void intel_dp_sync_state(struct intel_encoder
*encoder
,
3794 const struct intel_crtc_state
*crtc_state
)
3796 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
3799 * Don't clobber DPCD if it's been already read out during output
3800 * setup (eDP) or detect.
3802 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0)
3803 intel_dp_get_dpcd(intel_dp
);
3805 intel_dp
->max_link_lane_count
= intel_dp_max_common_lane_count(intel_dp
);
3806 intel_dp
->max_link_rate
= intel_dp_max_common_rate(intel_dp
);
3809 bool intel_dp_initial_fastset_check(struct intel_encoder
*encoder
,
3810 struct intel_crtc_state
*crtc_state
)
3812 struct drm_i915_private
*i915
= to_i915(encoder
->base
.dev
);
3813 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
3816 * If BIOS has set an unsupported or non-standard link rate for some
3817 * reason force an encoder recompute and full modeset.
3819 if (intel_dp_rate_index(intel_dp
->source_rates
, intel_dp
->num_source_rates
,
3820 crtc_state
->port_clock
) < 0) {
3821 drm_dbg_kms(&i915
->drm
, "Forcing full modeset due to unsupported link rate\n");
3822 crtc_state
->uapi
.connectors_changed
= true;
3827 * FIXME hack to force full modeset when DSC is being used.
3829 * As long as we do not have full state readout and config comparison
3830 * of crtc_state->dsc, we have no way to ensure reliable fastset.
3831 * Remove once we have readout for DSC.
3833 if (crtc_state
->dsc
.compression_enable
) {
3834 drm_dbg_kms(&i915
->drm
, "Forcing full modeset due to DSC being enabled\n");
3835 crtc_state
->uapi
.mode_changed
= true;
3839 if (CAN_PSR(i915
) && intel_dp_is_edp(intel_dp
)) {
3840 drm_dbg_kms(&i915
->drm
, "Forcing full modeset to compute PSR state\n");
3841 crtc_state
->uapi
.mode_changed
= true;
3848 static void intel_disable_dp(struct intel_atomic_state
*state
,
3849 struct intel_encoder
*encoder
,
3850 const struct intel_crtc_state
*old_crtc_state
,
3851 const struct drm_connector_state
*old_conn_state
)
3853 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
3855 intel_dp
->link_trained
= false;
3857 if (old_crtc_state
->has_audio
)
3858 intel_audio_codec_disable(encoder
,
3859 old_crtc_state
, old_conn_state
);
3861 /* Make sure the panel is off before trying to change the mode. But also
3862 * ensure that we have vdd while we switch off the panel. */
3863 intel_edp_panel_vdd_on(intel_dp
);
3864 intel_edp_backlight_off(old_conn_state
);
3865 intel_dp_set_power(intel_dp
, DP_SET_POWER_D3
);
3866 intel_edp_panel_off(intel_dp
);
3869 static void g4x_disable_dp(struct intel_atomic_state
*state
,
3870 struct intel_encoder
*encoder
,
3871 const struct intel_crtc_state
*old_crtc_state
,
3872 const struct drm_connector_state
*old_conn_state
)
3874 intel_disable_dp(state
, encoder
, old_crtc_state
, old_conn_state
);
3877 static void vlv_disable_dp(struct intel_atomic_state
*state
,
3878 struct intel_encoder
*encoder
,
3879 const struct intel_crtc_state
*old_crtc_state
,
3880 const struct drm_connector_state
*old_conn_state
)
3882 intel_disable_dp(state
, encoder
, old_crtc_state
, old_conn_state
);
3885 static void g4x_post_disable_dp(struct intel_atomic_state
*state
,
3886 struct intel_encoder
*encoder
,
3887 const struct intel_crtc_state
*old_crtc_state
,
3888 const struct drm_connector_state
*old_conn_state
)
3890 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
3891 enum port port
= encoder
->port
;
3894 * Bspec does not list a specific disable sequence for g4x DP.
3895 * Follow the ilk+ sequence (disable pipe before the port) for
3896 * g4x DP as it does not suffer from underruns like the normal
3897 * g4x modeset sequence (disable pipe after the port).
3899 intel_dp_link_down(encoder
, old_crtc_state
);
3901 /* Only ilk+ has port A */
3903 ilk_edp_pll_off(intel_dp
, old_crtc_state
);
3906 static void vlv_post_disable_dp(struct intel_atomic_state
*state
,
3907 struct intel_encoder
*encoder
,
3908 const struct intel_crtc_state
*old_crtc_state
,
3909 const struct drm_connector_state
*old_conn_state
)
3911 intel_dp_link_down(encoder
, old_crtc_state
);
3914 static void chv_post_disable_dp(struct intel_atomic_state
*state
,
3915 struct intel_encoder
*encoder
,
3916 const struct intel_crtc_state
*old_crtc_state
,
3917 const struct drm_connector_state
*old_conn_state
)
3919 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
3921 intel_dp_link_down(encoder
, old_crtc_state
);
3923 vlv_dpio_get(dev_priv
);
3925 /* Assert data lane reset */
3926 chv_data_lane_soft_reset(encoder
, old_crtc_state
, true);
3928 vlv_dpio_put(dev_priv
);
3932 cpt_set_link_train(struct intel_dp
*intel_dp
,
3933 const struct intel_crtc_state
*crtc_state
,
3936 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3937 u32
*DP
= &intel_dp
->DP
;
3939 *DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
3941 switch (intel_dp_training_pattern_symbol(dp_train_pat
)) {
3942 case DP_TRAINING_PATTERN_DISABLE
:
3943 *DP
|= DP_LINK_TRAIN_OFF_CPT
;
3945 case DP_TRAINING_PATTERN_1
:
3946 *DP
|= DP_LINK_TRAIN_PAT_1_CPT
;
3948 case DP_TRAINING_PATTERN_2
:
3949 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
3951 case DP_TRAINING_PATTERN_3
:
3952 drm_dbg_kms(&dev_priv
->drm
,
3953 "TPS3 not supported, using TPS2 instead\n");
3954 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
3958 intel_de_write(dev_priv
, intel_dp
->output_reg
, intel_dp
->DP
);
3959 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
3963 g4x_set_link_train(struct intel_dp
*intel_dp
,
3964 const struct intel_crtc_state
*crtc_state
,
3967 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3968 u32
*DP
= &intel_dp
->DP
;
3970 *DP
&= ~DP_LINK_TRAIN_MASK
;
3972 switch (intel_dp_training_pattern_symbol(dp_train_pat
)) {
3973 case DP_TRAINING_PATTERN_DISABLE
:
3974 *DP
|= DP_LINK_TRAIN_OFF
;
3976 case DP_TRAINING_PATTERN_1
:
3977 *DP
|= DP_LINK_TRAIN_PAT_1
;
3979 case DP_TRAINING_PATTERN_2
:
3980 *DP
|= DP_LINK_TRAIN_PAT_2
;
3982 case DP_TRAINING_PATTERN_3
:
3983 drm_dbg_kms(&dev_priv
->drm
,
3984 "TPS3 not supported, using TPS2 instead\n");
3985 *DP
|= DP_LINK_TRAIN_PAT_2
;
3989 intel_de_write(dev_priv
, intel_dp
->output_reg
, intel_dp
->DP
);
3990 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
3993 static void intel_dp_enable_port(struct intel_dp
*intel_dp
,
3994 const struct intel_crtc_state
*crtc_state
)
3996 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
3998 /* enable with pattern 1 (as per spec) */
4000 intel_dp_program_link_training_pattern(intel_dp
, crtc_state
,
4001 DP_TRAINING_PATTERN_1
);
4004 * Magic for VLV/CHV. We _must_ first set up the register
4005 * without actually enabling the port, and then do another
4006 * write to enable the port. Otherwise link training will
4007 * fail when the power sequencer is freshly used for this port.
4009 intel_dp
->DP
|= DP_PORT_EN
;
4010 if (crtc_state
->has_audio
)
4011 intel_dp
->DP
|= DP_AUDIO_OUTPUT_ENABLE
;
4013 intel_de_write(dev_priv
, intel_dp
->output_reg
, intel_dp
->DP
);
4014 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
4017 void intel_dp_configure_protocol_converter(struct intel_dp
*intel_dp
)
4019 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
4022 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x13)
4025 if (!drm_dp_is_branch(intel_dp
->dpcd
))
4028 tmp
= intel_dp
->has_hdmi_sink
?
4029 DP_HDMI_DVI_OUTPUT_CONFIG
: 0;
4031 if (drm_dp_dpcd_writeb(&intel_dp
->aux
,
4032 DP_PROTOCOL_CONVERTER_CONTROL_0
, tmp
) != 1)
4033 drm_dbg_kms(&i915
->drm
, "Failed to set protocol converter HDMI mode to %s\n",
4034 enableddisabled(intel_dp
->has_hdmi_sink
));
4036 tmp
= intel_dp
->dfp
.ycbcr_444_to_420
?
4037 DP_CONVERSION_TO_YCBCR420_ENABLE
: 0;
4039 if (drm_dp_dpcd_writeb(&intel_dp
->aux
,
4040 DP_PROTOCOL_CONVERTER_CONTROL_1
, tmp
) != 1)
4041 drm_dbg_kms(&i915
->drm
,
4042 "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n",
4043 enableddisabled(intel_dp
->dfp
.ycbcr_444_to_420
));
4047 if (drm_dp_dpcd_writeb(&intel_dp
->aux
,
4048 DP_PROTOCOL_CONVERTER_CONTROL_2
, tmp
) <= 0)
4049 drm_dbg_kms(&i915
->drm
,
4050 "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n",
4051 enableddisabled(false));
4054 static void intel_enable_dp(struct intel_atomic_state
*state
,
4055 struct intel_encoder
*encoder
,
4056 const struct intel_crtc_state
*pipe_config
,
4057 const struct drm_connector_state
*conn_state
)
4059 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
4060 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
4061 struct intel_crtc
*crtc
= to_intel_crtc(pipe_config
->uapi
.crtc
);
4062 u32 dp_reg
= intel_de_read(dev_priv
, intel_dp
->output_reg
);
4063 enum pipe pipe
= crtc
->pipe
;
4064 intel_wakeref_t wakeref
;
4066 if (drm_WARN_ON(&dev_priv
->drm
, dp_reg
& DP_PORT_EN
))
4069 with_pps_lock(intel_dp
, wakeref
) {
4070 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
4071 vlv_init_panel_power_sequencer(encoder
, pipe_config
);
4073 intel_dp_enable_port(intel_dp
, pipe_config
);
4075 edp_panel_vdd_on(intel_dp
);
4076 edp_panel_on(intel_dp
);
4077 edp_panel_vdd_off(intel_dp
, true);
4080 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
4081 unsigned int lane_mask
= 0x0;
4083 if (IS_CHERRYVIEW(dev_priv
))
4084 lane_mask
= intel_dp_unused_lane_mask(pipe_config
->lane_count
);
4086 vlv_wait_port_ready(dev_priv
, dp_to_dig_port(intel_dp
),
4090 intel_dp_set_power(intel_dp
, DP_SET_POWER_D0
);
4091 intel_dp_configure_protocol_converter(intel_dp
);
4092 intel_dp_start_link_train(intel_dp
, pipe_config
);
4093 intel_dp_stop_link_train(intel_dp
, pipe_config
);
4095 if (pipe_config
->has_audio
) {
4096 drm_dbg(&dev_priv
->drm
, "Enabling DP audio on pipe %c\n",
4098 intel_audio_codec_enable(encoder
, pipe_config
, conn_state
);
4102 static void g4x_enable_dp(struct intel_atomic_state
*state
,
4103 struct intel_encoder
*encoder
,
4104 const struct intel_crtc_state
*pipe_config
,
4105 const struct drm_connector_state
*conn_state
)
4107 intel_enable_dp(state
, encoder
, pipe_config
, conn_state
);
4108 intel_edp_backlight_on(pipe_config
, conn_state
);
4111 static void vlv_enable_dp(struct intel_atomic_state
*state
,
4112 struct intel_encoder
*encoder
,
4113 const struct intel_crtc_state
*pipe_config
,
4114 const struct drm_connector_state
*conn_state
)
4116 intel_edp_backlight_on(pipe_config
, conn_state
);
4119 static void g4x_pre_enable_dp(struct intel_atomic_state
*state
,
4120 struct intel_encoder
*encoder
,
4121 const struct intel_crtc_state
*pipe_config
,
4122 const struct drm_connector_state
*conn_state
)
4124 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
4125 enum port port
= encoder
->port
;
4127 intel_dp_prepare(encoder
, pipe_config
);
4129 /* Only ilk+ has port A */
4131 ilk_edp_pll_on(intel_dp
, pipe_config
);
4134 static void vlv_detach_power_sequencer(struct intel_dp
*intel_dp
)
4136 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
4137 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
4138 enum pipe pipe
= intel_dp
->pps_pipe
;
4139 i915_reg_t pp_on_reg
= PP_ON_DELAYS(pipe
);
4141 drm_WARN_ON(&dev_priv
->drm
, intel_dp
->active_pipe
!= INVALID_PIPE
);
4143 if (drm_WARN_ON(&dev_priv
->drm
, pipe
!= PIPE_A
&& pipe
!= PIPE_B
))
4146 edp_panel_vdd_off_sync(intel_dp
);
4149 * VLV seems to get confused when multiple power sequencers
4150 * have the same port selected (even if only one has power/vdd
4151 * enabled). The failure manifests as vlv_wait_port_ready() failing
4152 * CHV on the other hand doesn't seem to mind having the same port
4153 * selected in multiple power sequencers, but let's clear the
4154 * port select always when logically disconnecting a power sequencer
4157 drm_dbg_kms(&dev_priv
->drm
,
4158 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
4159 pipe_name(pipe
), dig_port
->base
.base
.base
.id
,
4160 dig_port
->base
.base
.name
);
4161 intel_de_write(dev_priv
, pp_on_reg
, 0);
4162 intel_de_posting_read(dev_priv
, pp_on_reg
);
4164 intel_dp
->pps_pipe
= INVALID_PIPE
;
4167 static void vlv_steal_power_sequencer(struct drm_i915_private
*dev_priv
,
4170 struct intel_encoder
*encoder
;
4172 lockdep_assert_held(&dev_priv
->pps_mutex
);
4174 for_each_intel_dp(&dev_priv
->drm
, encoder
) {
4175 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
4177 drm_WARN(&dev_priv
->drm
, intel_dp
->active_pipe
== pipe
,
4178 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
4179 pipe_name(pipe
), encoder
->base
.base
.id
,
4180 encoder
->base
.name
);
4182 if (intel_dp
->pps_pipe
!= pipe
)
4185 drm_dbg_kms(&dev_priv
->drm
,
4186 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
4187 pipe_name(pipe
), encoder
->base
.base
.id
,
4188 encoder
->base
.name
);
4190 /* make sure vdd is off before we steal it */
4191 vlv_detach_power_sequencer(intel_dp
);
4195 static void vlv_init_panel_power_sequencer(struct intel_encoder
*encoder
,
4196 const struct intel_crtc_state
*crtc_state
)
4198 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
4199 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
4200 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
4202 lockdep_assert_held(&dev_priv
->pps_mutex
);
4204 drm_WARN_ON(&dev_priv
->drm
, intel_dp
->active_pipe
!= INVALID_PIPE
);
4206 if (intel_dp
->pps_pipe
!= INVALID_PIPE
&&
4207 intel_dp
->pps_pipe
!= crtc
->pipe
) {
4209 * If another power sequencer was being used on this
4210 * port previously make sure to turn off vdd there while
4211 * we still have control of it.
4213 vlv_detach_power_sequencer(intel_dp
);
4217 * We may be stealing the power
4218 * sequencer from another port.
4220 vlv_steal_power_sequencer(dev_priv
, crtc
->pipe
);
4222 intel_dp
->active_pipe
= crtc
->pipe
;
4224 if (!intel_dp_is_edp(intel_dp
))
4227 /* now it's all ours */
4228 intel_dp
->pps_pipe
= crtc
->pipe
;
4230 drm_dbg_kms(&dev_priv
->drm
,
4231 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
4232 pipe_name(intel_dp
->pps_pipe
), encoder
->base
.base
.id
,
4233 encoder
->base
.name
);
4235 /* init power sequencer on this pipe and port */
4236 intel_dp_init_panel_power_sequencer(intel_dp
);
4237 intel_dp_init_panel_power_sequencer_registers(intel_dp
, true);
4240 static void vlv_pre_enable_dp(struct intel_atomic_state
*state
,
4241 struct intel_encoder
*encoder
,
4242 const struct intel_crtc_state
*pipe_config
,
4243 const struct drm_connector_state
*conn_state
)
4245 vlv_phy_pre_encoder_enable(encoder
, pipe_config
);
4247 intel_enable_dp(state
, encoder
, pipe_config
, conn_state
);
4250 static void vlv_dp_pre_pll_enable(struct intel_atomic_state
*state
,
4251 struct intel_encoder
*encoder
,
4252 const struct intel_crtc_state
*pipe_config
,
4253 const struct drm_connector_state
*conn_state
)
4255 intel_dp_prepare(encoder
, pipe_config
);
4257 vlv_phy_pre_pll_enable(encoder
, pipe_config
);
4260 static void chv_pre_enable_dp(struct intel_atomic_state
*state
,
4261 struct intel_encoder
*encoder
,
4262 const struct intel_crtc_state
*pipe_config
,
4263 const struct drm_connector_state
*conn_state
)
4265 chv_phy_pre_encoder_enable(encoder
, pipe_config
);
4267 intel_enable_dp(state
, encoder
, pipe_config
, conn_state
);
4269 /* Second common lane will stay alive on its own now */
4270 chv_phy_release_cl2_override(encoder
);
4273 static void chv_dp_pre_pll_enable(struct intel_atomic_state
*state
,
4274 struct intel_encoder
*encoder
,
4275 const struct intel_crtc_state
*pipe_config
,
4276 const struct drm_connector_state
*conn_state
)
4278 intel_dp_prepare(encoder
, pipe_config
);
4280 chv_phy_pre_pll_enable(encoder
, pipe_config
);
4283 static void chv_dp_post_pll_disable(struct intel_atomic_state
*state
,
4284 struct intel_encoder
*encoder
,
4285 const struct intel_crtc_state
*old_crtc_state
,
4286 const struct drm_connector_state
*old_conn_state
)
4288 chv_phy_post_pll_disable(encoder
, old_crtc_state
);
4291 static u8
intel_dp_voltage_max_2(struct intel_dp
*intel_dp
,
4292 const struct intel_crtc_state
*crtc_state
)
4294 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
4297 static u8
intel_dp_voltage_max_3(struct intel_dp
*intel_dp
,
4298 const struct intel_crtc_state
*crtc_state
)
4300 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
4303 static u8
intel_dp_preemph_max_2(struct intel_dp
*intel_dp
)
4305 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
4308 static u8
intel_dp_preemph_max_3(struct intel_dp
*intel_dp
)
4310 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
4313 static void vlv_set_signal_levels(struct intel_dp
*intel_dp
,
4314 const struct intel_crtc_state
*crtc_state
)
4316 struct intel_encoder
*encoder
= &dp_to_dig_port(intel_dp
)->base
;
4317 unsigned long demph_reg_value
, preemph_reg_value
,
4318 uniqtranscale_reg_value
;
4319 u8 train_set
= intel_dp
->train_set
[0];
4321 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
4322 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
4323 preemph_reg_value
= 0x0004000;
4324 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
4325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
4326 demph_reg_value
= 0x2B405555;
4327 uniqtranscale_reg_value
= 0x552AB83A;
4329 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
4330 demph_reg_value
= 0x2B404040;
4331 uniqtranscale_reg_value
= 0x5548B83A;
4333 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
4334 demph_reg_value
= 0x2B245555;
4335 uniqtranscale_reg_value
= 0x5560B83A;
4337 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
4338 demph_reg_value
= 0x2B405555;
4339 uniqtranscale_reg_value
= 0x5598DA3A;
4345 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
4346 preemph_reg_value
= 0x0002000;
4347 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
4348 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
4349 demph_reg_value
= 0x2B404040;
4350 uniqtranscale_reg_value
= 0x5552B83A;
4352 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
4353 demph_reg_value
= 0x2B404848;
4354 uniqtranscale_reg_value
= 0x5580B83A;
4356 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
4357 demph_reg_value
= 0x2B404040;
4358 uniqtranscale_reg_value
= 0x55ADDA3A;
4364 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
4365 preemph_reg_value
= 0x0000000;
4366 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
4367 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
4368 demph_reg_value
= 0x2B305555;
4369 uniqtranscale_reg_value
= 0x5570B83A;
4371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
4372 demph_reg_value
= 0x2B2B4040;
4373 uniqtranscale_reg_value
= 0x55ADDA3A;
4379 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
4380 preemph_reg_value
= 0x0006000;
4381 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
4382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
4383 demph_reg_value
= 0x1B405555;
4384 uniqtranscale_reg_value
= 0x55ADDA3A;
4394 vlv_set_phy_signal_level(encoder
, crtc_state
,
4395 demph_reg_value
, preemph_reg_value
,
4396 uniqtranscale_reg_value
, 0);
4399 static void chv_set_signal_levels(struct intel_dp
*intel_dp
,
4400 const struct intel_crtc_state
*crtc_state
)
4402 struct intel_encoder
*encoder
= &dp_to_dig_port(intel_dp
)->base
;
4403 u32 deemph_reg_value
, margin_reg_value
;
4404 bool uniq_trans_scale
= false;
4405 u8 train_set
= intel_dp
->train_set
[0];
4407 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
4408 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
4409 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
4410 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
4411 deemph_reg_value
= 128;
4412 margin_reg_value
= 52;
4414 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
4415 deemph_reg_value
= 128;
4416 margin_reg_value
= 77;
4418 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
4419 deemph_reg_value
= 128;
4420 margin_reg_value
= 102;
4422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
4423 deemph_reg_value
= 128;
4424 margin_reg_value
= 154;
4425 uniq_trans_scale
= true;
4431 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
4432 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
4433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
4434 deemph_reg_value
= 85;
4435 margin_reg_value
= 78;
4437 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
4438 deemph_reg_value
= 85;
4439 margin_reg_value
= 116;
4441 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
4442 deemph_reg_value
= 85;
4443 margin_reg_value
= 154;
4449 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
4450 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
4451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
4452 deemph_reg_value
= 64;
4453 margin_reg_value
= 104;
4455 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
4456 deemph_reg_value
= 64;
4457 margin_reg_value
= 154;
4463 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
4464 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
4465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
4466 deemph_reg_value
= 43;
4467 margin_reg_value
= 154;
4477 chv_set_phy_signal_level(encoder
, crtc_state
,
4478 deemph_reg_value
, margin_reg_value
,
4482 static u32
g4x_signal_levels(u8 train_set
)
4484 u32 signal_levels
= 0;
4486 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
4487 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
4489 signal_levels
|= DP_VOLTAGE_0_4
;
4491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
4492 signal_levels
|= DP_VOLTAGE_0_6
;
4494 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
4495 signal_levels
|= DP_VOLTAGE_0_8
;
4497 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
4498 signal_levels
|= DP_VOLTAGE_1_2
;
4501 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
4502 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
4504 signal_levels
|= DP_PRE_EMPHASIS_0
;
4506 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
4507 signal_levels
|= DP_PRE_EMPHASIS_3_5
;
4509 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
4510 signal_levels
|= DP_PRE_EMPHASIS_6
;
4512 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
4513 signal_levels
|= DP_PRE_EMPHASIS_9_5
;
4516 return signal_levels
;
4520 g4x_set_signal_levels(struct intel_dp
*intel_dp
,
4521 const struct intel_crtc_state
*crtc_state
)
4523 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
4524 u8 train_set
= intel_dp
->train_set
[0];
4527 signal_levels
= g4x_signal_levels(train_set
);
4529 drm_dbg_kms(&dev_priv
->drm
, "Using signal levels %08x\n",
4532 intel_dp
->DP
&= ~(DP_VOLTAGE_MASK
| DP_PRE_EMPHASIS_MASK
);
4533 intel_dp
->DP
|= signal_levels
;
4535 intel_de_write(dev_priv
, intel_dp
->output_reg
, intel_dp
->DP
);
4536 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
4539 /* SNB CPU eDP voltage swing and pre-emphasis control */
4540 static u32
snb_cpu_edp_signal_levels(u8 train_set
)
4542 u8 signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
4543 DP_TRAIN_PRE_EMPHASIS_MASK
);
4545 switch (signal_levels
) {
4546 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
4547 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
4548 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
4549 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
4550 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B
;
4551 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
4552 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
4553 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B
;
4554 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
4555 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
4556 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B
;
4557 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
4558 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
4559 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B
;
4561 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4562 "0x%x\n", signal_levels
);
4563 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
4568 snb_cpu_edp_set_signal_levels(struct intel_dp
*intel_dp
,
4569 const struct intel_crtc_state
*crtc_state
)
4571 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
4572 u8 train_set
= intel_dp
->train_set
[0];
4575 signal_levels
= snb_cpu_edp_signal_levels(train_set
);
4577 drm_dbg_kms(&dev_priv
->drm
, "Using signal levels %08x\n",
4580 intel_dp
->DP
&= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB
;
4581 intel_dp
->DP
|= signal_levels
;
4583 intel_de_write(dev_priv
, intel_dp
->output_reg
, intel_dp
->DP
);
4584 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
4587 /* IVB CPU eDP voltage swing and pre-emphasis control */
4588 static u32
ivb_cpu_edp_signal_levels(u8 train_set
)
4590 u8 signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
4591 DP_TRAIN_PRE_EMPHASIS_MASK
);
4593 switch (signal_levels
) {
4594 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
4595 return EDP_LINK_TRAIN_400MV_0DB_IVB
;
4596 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
4597 return EDP_LINK_TRAIN_400MV_3_5DB_IVB
;
4598 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
4599 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
4600 return EDP_LINK_TRAIN_400MV_6DB_IVB
;
4602 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
4603 return EDP_LINK_TRAIN_600MV_0DB_IVB
;
4604 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
4605 return EDP_LINK_TRAIN_600MV_3_5DB_IVB
;
4607 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
4608 return EDP_LINK_TRAIN_800MV_0DB_IVB
;
4609 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
4610 return EDP_LINK_TRAIN_800MV_3_5DB_IVB
;
4613 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4614 "0x%x\n", signal_levels
);
4615 return EDP_LINK_TRAIN_500MV_0DB_IVB
;
4620 ivb_cpu_edp_set_signal_levels(struct intel_dp
*intel_dp
,
4621 const struct intel_crtc_state
*crtc_state
)
4623 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
4624 u8 train_set
= intel_dp
->train_set
[0];
4627 signal_levels
= ivb_cpu_edp_signal_levels(train_set
);
4629 drm_dbg_kms(&dev_priv
->drm
, "Using signal levels %08x\n",
4632 intel_dp
->DP
&= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB
;
4633 intel_dp
->DP
|= signal_levels
;
4635 intel_de_write(dev_priv
, intel_dp
->output_reg
, intel_dp
->DP
);
4636 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
4639 void intel_dp_set_signal_levels(struct intel_dp
*intel_dp
,
4640 const struct intel_crtc_state
*crtc_state
)
4642 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
4643 u8 train_set
= intel_dp
->train_set
[0];
4645 drm_dbg_kms(&dev_priv
->drm
, "Using vswing level %d%s\n",
4646 train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
,
4647 train_set
& DP_TRAIN_MAX_SWING_REACHED
? " (max)" : "");
4648 drm_dbg_kms(&dev_priv
->drm
, "Using pre-emphasis level %d%s\n",
4649 (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) >>
4650 DP_TRAIN_PRE_EMPHASIS_SHIFT
,
4651 train_set
& DP_TRAIN_MAX_PRE_EMPHASIS_REACHED
?
4654 intel_dp
->set_signal_levels(intel_dp
, crtc_state
);
4658 intel_dp_program_link_training_pattern(struct intel_dp
*intel_dp
,
4659 const struct intel_crtc_state
*crtc_state
,
4662 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
4664 if ((intel_dp_training_pattern_symbol(dp_train_pat
)) !=
4665 DP_TRAINING_PATTERN_DISABLE
)
4666 drm_dbg_kms(&dev_priv
->drm
,
4667 "Using DP training pattern TPS%d\n",
4668 intel_dp_training_pattern_symbol(dp_train_pat
));
4670 intel_dp
->set_link_train(intel_dp
, crtc_state
, dp_train_pat
);
4674 intel_dp_link_down(struct intel_encoder
*encoder
,
4675 const struct intel_crtc_state
*old_crtc_state
)
4677 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
4678 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
4679 struct intel_crtc
*crtc
= to_intel_crtc(old_crtc_state
->uapi
.crtc
);
4680 enum port port
= encoder
->port
;
4681 u32 DP
= intel_dp
->DP
;
4683 if (drm_WARN_ON(&dev_priv
->drm
,
4684 (intel_de_read(dev_priv
, intel_dp
->output_reg
) &
4688 drm_dbg_kms(&dev_priv
->drm
, "\n");
4690 if ((IS_IVYBRIDGE(dev_priv
) && port
== PORT_A
) ||
4691 (HAS_PCH_CPT(dev_priv
) && port
!= PORT_A
)) {
4692 DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
4693 DP
|= DP_LINK_TRAIN_PAT_IDLE_CPT
;
4695 DP
&= ~DP_LINK_TRAIN_MASK
;
4696 DP
|= DP_LINK_TRAIN_PAT_IDLE
;
4698 intel_de_write(dev_priv
, intel_dp
->output_reg
, DP
);
4699 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
4701 DP
&= ~(DP_PORT_EN
| DP_AUDIO_OUTPUT_ENABLE
);
4702 intel_de_write(dev_priv
, intel_dp
->output_reg
, DP
);
4703 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
4706 * HW workaround for IBX, we need to move the port
4707 * to transcoder A after disabling it to allow the
4708 * matching HDMI port to be enabled on transcoder A.
4710 if (HAS_PCH_IBX(dev_priv
) && crtc
->pipe
== PIPE_B
&& port
!= PORT_A
) {
4712 * We get CPU/PCH FIFO underruns on the other pipe when
4713 * doing the workaround. Sweep them under the rug.
4715 intel_set_cpu_fifo_underrun_reporting(dev_priv
, PIPE_A
, false);
4716 intel_set_pch_fifo_underrun_reporting(dev_priv
, PIPE_A
, false);
4718 /* always enable with pattern 1 (as per spec) */
4719 DP
&= ~(DP_PIPE_SEL_MASK
| DP_LINK_TRAIN_MASK
);
4720 DP
|= DP_PORT_EN
| DP_PIPE_SEL(PIPE_A
) |
4721 DP_LINK_TRAIN_PAT_1
;
4722 intel_de_write(dev_priv
, intel_dp
->output_reg
, DP
);
4723 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
4726 intel_de_write(dev_priv
, intel_dp
->output_reg
, DP
);
4727 intel_de_posting_read(dev_priv
, intel_dp
->output_reg
);
4729 intel_wait_for_vblank_if_active(dev_priv
, PIPE_A
);
4730 intel_set_cpu_fifo_underrun_reporting(dev_priv
, PIPE_A
, true);
4731 intel_set_pch_fifo_underrun_reporting(dev_priv
, PIPE_A
, true);
4734 msleep(intel_dp
->panel_power_down_delay
);
4738 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
4739 intel_wakeref_t wakeref
;
4741 with_pps_lock(intel_dp
, wakeref
)
4742 intel_dp
->active_pipe
= INVALID_PIPE
;
4746 bool intel_dp_get_colorimetry_status(struct intel_dp
*intel_dp
)
4750 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_DPRX_FEATURE_ENUMERATION_LIST
,
4753 return dprx
& DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED
;
4756 static void intel_dp_get_dsc_sink_cap(struct intel_dp
*intel_dp
)
4758 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
4761 * Clear the cached register set to avoid using stale values
4762 * for the sinks that do not support DSC.
4764 memset(intel_dp
->dsc_dpcd
, 0, sizeof(intel_dp
->dsc_dpcd
));
4766 /* Clear fec_capable to avoid using stale values */
4767 intel_dp
->fec_capable
= 0;
4769 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4770 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x14 ||
4771 intel_dp
->edp_dpcd
[0] >= DP_EDP_14
) {
4772 if (drm_dp_dpcd_read(&intel_dp
->aux
, DP_DSC_SUPPORT
,
4774 sizeof(intel_dp
->dsc_dpcd
)) < 0)
4776 "Failed to read DPCD register 0x%x\n",
4779 drm_dbg_kms(&i915
->drm
, "DSC DPCD: %*ph\n",
4780 (int)sizeof(intel_dp
->dsc_dpcd
),
4781 intel_dp
->dsc_dpcd
);
4783 /* FEC is supported only on DP 1.4 */
4784 if (!intel_dp_is_edp(intel_dp
) &&
4785 drm_dp_dpcd_readb(&intel_dp
->aux
, DP_FEC_CAPABILITY
,
4786 &intel_dp
->fec_capable
) < 0)
4788 "Failed to read FEC DPCD register\n");
4790 drm_dbg_kms(&i915
->drm
, "FEC CAPABILITY: %x\n",
4791 intel_dp
->fec_capable
);
4796 intel_edp_init_dpcd(struct intel_dp
*intel_dp
)
4798 struct drm_i915_private
*dev_priv
=
4799 to_i915(dp_to_dig_port(intel_dp
)->base
.base
.dev
);
4801 /* this function is meant to be called only once */
4802 drm_WARN_ON(&dev_priv
->drm
, intel_dp
->dpcd
[DP_DPCD_REV
] != 0);
4804 if (drm_dp_read_dpcd_caps(&intel_dp
->aux
, intel_dp
->dpcd
) != 0)
4807 drm_dp_read_desc(&intel_dp
->aux
, &intel_dp
->desc
,
4808 drm_dp_is_branch(intel_dp
->dpcd
));
4811 * Read the eDP display control registers.
4813 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4814 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4815 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4816 * method). The display control registers should read zero if they're
4817 * not supported anyway.
4819 if (drm_dp_dpcd_read(&intel_dp
->aux
, DP_EDP_DPCD_REV
,
4820 intel_dp
->edp_dpcd
, sizeof(intel_dp
->edp_dpcd
)) ==
4821 sizeof(intel_dp
->edp_dpcd
))
4822 drm_dbg_kms(&dev_priv
->drm
, "eDP DPCD: %*ph\n",
4823 (int)sizeof(intel_dp
->edp_dpcd
),
4824 intel_dp
->edp_dpcd
);
4827 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4828 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4830 intel_psr_init_dpcd(intel_dp
);
4832 /* Read the eDP 1.4+ supported link rates. */
4833 if (intel_dp
->edp_dpcd
[0] >= DP_EDP_14
) {
4834 __le16 sink_rates
[DP_MAX_SUPPORTED_RATES
];
4837 drm_dp_dpcd_read(&intel_dp
->aux
, DP_SUPPORTED_LINK_RATES
,
4838 sink_rates
, sizeof(sink_rates
));
4840 for (i
= 0; i
< ARRAY_SIZE(sink_rates
); i
++) {
4841 int val
= le16_to_cpu(sink_rates
[i
]);
4846 /* Value read multiplied by 200kHz gives the per-lane
4847 * link rate in kHz. The source rates are, however,
4848 * stored in terms of LS_Clk kHz. The full conversion
4849 * back to symbols is
4850 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4852 intel_dp
->sink_rates
[i
] = (val
* 200) / 10;
4854 intel_dp
->num_sink_rates
= i
;
4858 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4859 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4861 if (intel_dp
->num_sink_rates
)
4862 intel_dp
->use_rate_select
= true;
4864 intel_dp_set_sink_rates(intel_dp
);
4866 intel_dp_set_common_rates(intel_dp
);
4868 /* Read the eDP DSC DPCD registers */
4869 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
))
4870 intel_dp_get_dsc_sink_cap(intel_dp
);
4876 intel_dp_has_sink_count(struct intel_dp
*intel_dp
)
4878 if (!intel_dp
->attached_connector
)
4881 return drm_dp_read_sink_count_cap(&intel_dp
->attached_connector
->base
,
4887 intel_dp_get_dpcd(struct intel_dp
*intel_dp
)
4891 intel_dp_lttpr_init(intel_dp
);
4893 if (drm_dp_read_dpcd_caps(&intel_dp
->aux
, intel_dp
->dpcd
))
4897 * Don't clobber cached eDP rates. Also skip re-reading
4898 * the OUI/ID since we know it won't change.
4900 if (!intel_dp_is_edp(intel_dp
)) {
4901 drm_dp_read_desc(&intel_dp
->aux
, &intel_dp
->desc
,
4902 drm_dp_is_branch(intel_dp
->dpcd
));
4904 intel_dp_set_sink_rates(intel_dp
);
4905 intel_dp_set_common_rates(intel_dp
);
4908 if (intel_dp_has_sink_count(intel_dp
)) {
4909 ret
= drm_dp_read_sink_count(&intel_dp
->aux
);
4914 * Sink count can change between short pulse hpd hence
4915 * a member variable in intel_dp will track any changes
4916 * between short pulse interrupts.
4918 intel_dp
->sink_count
= ret
;
4921 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4922 * a dongle is present but no display. Unless we require to know
4923 * if a dongle is present or not, we don't need to update
4924 * downstream port information. So, an early return here saves
4925 * time from performing other operations which are not required.
4927 if (!intel_dp
->sink_count
)
4931 return drm_dp_read_downstream_info(&intel_dp
->aux
, intel_dp
->dpcd
,
4932 intel_dp
->downstream_ports
) == 0;
4936 intel_dp_can_mst(struct intel_dp
*intel_dp
)
4938 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
4940 return i915
->params
.enable_dp_mst
&&
4941 intel_dp
->can_mst
&&
4942 drm_dp_read_mst_cap(&intel_dp
->aux
, intel_dp
->dpcd
);
4946 intel_dp_configure_mst(struct intel_dp
*intel_dp
)
4948 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
4949 struct intel_encoder
*encoder
=
4950 &dp_to_dig_port(intel_dp
)->base
;
4951 bool sink_can_mst
= drm_dp_read_mst_cap(&intel_dp
->aux
, intel_dp
->dpcd
);
4953 drm_dbg_kms(&i915
->drm
,
4954 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
4955 encoder
->base
.base
.id
, encoder
->base
.name
,
4956 yesno(intel_dp
->can_mst
), yesno(sink_can_mst
),
4957 yesno(i915
->params
.enable_dp_mst
));
4959 if (!intel_dp
->can_mst
)
4962 intel_dp
->is_mst
= sink_can_mst
&&
4963 i915
->params
.enable_dp_mst
;
4965 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
,
4970 intel_dp_get_sink_irq_esi(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
4972 return drm_dp_dpcd_read(&intel_dp
->aux
, DP_SINK_COUNT_ESI
,
4973 sink_irq_vector
, DP_DPRX_ESI_LEN
) ==
4978 intel_dp_needs_vsc_sdp(const struct intel_crtc_state
*crtc_state
,
4979 const struct drm_connector_state
*conn_state
)
4982 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
4983 * of Color Encoding Format and Content Color Gamut], in order to
4984 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
4986 if (crtc_state
->output_format
== INTEL_OUTPUT_FORMAT_YCBCR420
)
4989 switch (conn_state
->colorspace
) {
4990 case DRM_MODE_COLORIMETRY_SYCC_601
:
4991 case DRM_MODE_COLORIMETRY_OPYCC_601
:
4992 case DRM_MODE_COLORIMETRY_BT2020_YCC
:
4993 case DRM_MODE_COLORIMETRY_BT2020_RGB
:
4994 case DRM_MODE_COLORIMETRY_BT2020_CYCC
:
5003 static ssize_t
intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp
*vsc
,
5004 struct dp_sdp
*sdp
, size_t size
)
5006 size_t length
= sizeof(struct dp_sdp
);
5011 memset(sdp
, 0, size
);
5014 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
5015 * VSC SDP Header Bytes
5017 sdp
->sdp_header
.HB0
= 0; /* Secondary-Data Packet ID = 0 */
5018 sdp
->sdp_header
.HB1
= vsc
->sdp_type
; /* Secondary-data Packet Type */
5019 sdp
->sdp_header
.HB2
= vsc
->revision
; /* Revision Number */
5020 sdp
->sdp_header
.HB3
= vsc
->length
; /* Number of Valid Data Bytes */
5023 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
5026 if (vsc
->revision
!= 0x5)
5029 /* VSC SDP Payload for DB16 through DB18 */
5030 /* Pixel Encoding and Colorimetry Formats */
5031 sdp
->db
[16] = (vsc
->pixelformat
& 0xf) << 4; /* DB16[7:4] */
5032 sdp
->db
[16] |= vsc
->colorimetry
& 0xf; /* DB16[3:0] */
5039 sdp
->db
[17] = 0x1; /* DB17[3:0] */
5051 MISSING_CASE(vsc
->bpc
);
5054 /* Dynamic Range and Component Bit Depth */
5055 if (vsc
->dynamic_range
== DP_DYNAMIC_RANGE_CTA
)
5056 sdp
->db
[17] |= 0x80; /* DB17[7] */
5059 sdp
->db
[18] = vsc
->content_type
& 0x7;
5066 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe
*drm_infoframe
,
5070 size_t length
= sizeof(struct dp_sdp
);
5071 const int infoframe_size
= HDMI_INFOFRAME_HEADER_SIZE
+ HDMI_DRM_INFOFRAME_SIZE
;
5072 unsigned char buf
[HDMI_INFOFRAME_HEADER_SIZE
+ HDMI_DRM_INFOFRAME_SIZE
];
5078 memset(sdp
, 0, size
);
5080 len
= hdmi_drm_infoframe_pack_only(drm_infoframe
, buf
, sizeof(buf
));
5082 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
5086 if (len
!= infoframe_size
) {
5087 DRM_DEBUG_KMS("wrong static hdr metadata size\n");
5092 * Set up the infoframe sdp packet for HDR static metadata.
5093 * Prepare VSC Header for SU as per DP 1.4a spec,
5094 * Table 2-100 and Table 2-101
5097 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
5098 sdp
->sdp_header
.HB0
= 0;
5100 * Packet Type 80h + Non-audio INFOFRAME Type value
5101 * HDMI_INFOFRAME_TYPE_DRM: 0x87
5102 * - 80h + Non-audio INFOFRAME Type value
5103 * - InfoFrame Type: 0x07
5104 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
5106 sdp
->sdp_header
.HB1
= drm_infoframe
->type
;
5108 * Least Significant Eight Bits of (Data Byte Count – 1)
5109 * infoframe_size - 1
5111 sdp
->sdp_header
.HB2
= 0x1D;
5112 /* INFOFRAME SDP Version Number */
5113 sdp
->sdp_header
.HB3
= (0x13 << 2);
5114 /* CTA Header Byte 2 (INFOFRAME Version Number) */
5115 sdp
->db
[0] = drm_infoframe
->version
;
5116 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
5117 sdp
->db
[1] = drm_infoframe
->length
;
5119 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
5120 * HDMI_INFOFRAME_HEADER_SIZE
5122 BUILD_BUG_ON(sizeof(sdp
->db
) < HDMI_DRM_INFOFRAME_SIZE
+ 2);
5123 memcpy(&sdp
->db
[2], &buf
[HDMI_INFOFRAME_HEADER_SIZE
],
5124 HDMI_DRM_INFOFRAME_SIZE
);
5127 * Size of DP infoframe sdp packet for HDR static metadata consists of
5128 * - DP SDP Header(struct dp_sdp_header): 4 bytes
5129 * - Two Data Blocks: 2 bytes
5130 * CTA Header Byte2 (INFOFRAME Version Number)
5131 * CTA Header Byte3 (Length of INFOFRAME)
5132 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
5134 * Prior to GEN11's GMP register size is identical to DP HDR static metadata
5135 * infoframe size. But GEN11+ has larger than that size, write_infoframe
5136 * will pad rest of the size.
5138 return sizeof(struct dp_sdp_header
) + 2 + HDMI_DRM_INFOFRAME_SIZE
;
5141 static void intel_write_dp_sdp(struct intel_encoder
*encoder
,
5142 const struct intel_crtc_state
*crtc_state
,
5145 struct intel_digital_port
*dig_port
= enc_to_dig_port(encoder
);
5146 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5147 struct dp_sdp sdp
= {};
5150 if ((crtc_state
->infoframes
.enable
&
5151 intel_hdmi_infoframe_enable(type
)) == 0)
5156 len
= intel_dp_vsc_sdp_pack(&crtc_state
->infoframes
.vsc
, &sdp
,
5159 case HDMI_PACKET_TYPE_GAMUT_METADATA
:
5160 len
= intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state
->infoframes
.drm
.drm
,
5168 if (drm_WARN_ON(&dev_priv
->drm
, len
< 0))
5171 dig_port
->write_infoframe(encoder
, crtc_state
, type
, &sdp
, len
);
5174 void intel_write_dp_vsc_sdp(struct intel_encoder
*encoder
,
5175 const struct intel_crtc_state
*crtc_state
,
5176 struct drm_dp_vsc_sdp
*vsc
)
5178 struct intel_digital_port
*dig_port
= enc_to_dig_port(encoder
);
5179 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5180 struct dp_sdp sdp
= {};
5183 len
= intel_dp_vsc_sdp_pack(vsc
, &sdp
, sizeof(sdp
));
5185 if (drm_WARN_ON(&dev_priv
->drm
, len
< 0))
5188 dig_port
->write_infoframe(encoder
, crtc_state
, DP_SDP_VSC
,
5192 void intel_dp_set_infoframes(struct intel_encoder
*encoder
,
5194 const struct intel_crtc_state
*crtc_state
,
5195 const struct drm_connector_state
*conn_state
)
5197 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5198 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
5199 i915_reg_t reg
= HSW_TVIDEO_DIP_CTL(crtc_state
->cpu_transcoder
);
5200 u32 dip_enable
= VIDEO_DIP_ENABLE_AVI_HSW
| VIDEO_DIP_ENABLE_GCP_HSW
|
5201 VIDEO_DIP_ENABLE_VS_HSW
| VIDEO_DIP_ENABLE_GMP_HSW
|
5202 VIDEO_DIP_ENABLE_SPD_HSW
| VIDEO_DIP_ENABLE_DRM_GLK
;
5203 u32 val
= intel_de_read(dev_priv
, reg
);
5205 /* TODO: Add DSC case (DIP_ENABLE_PPS) */
5206 /* When PSR is enabled, this routine doesn't disable VSC DIP */
5207 if (intel_psr_enabled(intel_dp
))
5210 val
&= ~(dip_enable
| VIDEO_DIP_ENABLE_VSC_HSW
);
5213 intel_de_write(dev_priv
, reg
, val
);
5214 intel_de_posting_read(dev_priv
, reg
);
5218 intel_de_write(dev_priv
, reg
, val
);
5219 intel_de_posting_read(dev_priv
, reg
);
5221 /* When PSR is enabled, VSC SDP is handled by PSR routine */
5222 if (!intel_psr_enabled(intel_dp
))
5223 intel_write_dp_sdp(encoder
, crtc_state
, DP_SDP_VSC
);
5225 intel_write_dp_sdp(encoder
, crtc_state
, HDMI_PACKET_TYPE_GAMUT_METADATA
);
5228 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp
*vsc
,
5229 const void *buffer
, size_t size
)
5231 const struct dp_sdp
*sdp
= buffer
;
5233 if (size
< sizeof(struct dp_sdp
))
5236 memset(vsc
, 0, size
);
5238 if (sdp
->sdp_header
.HB0
!= 0)
5241 if (sdp
->sdp_header
.HB1
!= DP_SDP_VSC
)
5244 vsc
->sdp_type
= sdp
->sdp_header
.HB1
;
5245 vsc
->revision
= sdp
->sdp_header
.HB2
;
5246 vsc
->length
= sdp
->sdp_header
.HB3
;
5248 if ((sdp
->sdp_header
.HB2
== 0x2 && sdp
->sdp_header
.HB3
== 0x8) ||
5249 (sdp
->sdp_header
.HB2
== 0x4 && sdp
->sdp_header
.HB3
== 0xe)) {
5251 * - HB2 = 0x2, HB3 = 0x8
5252 * VSC SDP supporting 3D stereo + PSR
5253 * - HB2 = 0x4, HB3 = 0xe
5254 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
5255 * first scan line of the SU region (applies to eDP v1.4b
5259 } else if (sdp
->sdp_header
.HB2
== 0x5 && sdp
->sdp_header
.HB3
== 0x13) {
5261 * - HB2 = 0x5, HB3 = 0x13
5262 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
5265 vsc
->pixelformat
= (sdp
->db
[16] >> 4) & 0xf;
5266 vsc
->colorimetry
= sdp
->db
[16] & 0xf;
5267 vsc
->dynamic_range
= (sdp
->db
[17] >> 7) & 0x1;
5269 switch (sdp
->db
[17] & 0x7) {
5286 MISSING_CASE(sdp
->db
[17] & 0x7);
5290 vsc
->content_type
= sdp
->db
[18] & 0x7;
5299 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe
*drm_infoframe
,
5300 const void *buffer
, size_t size
)
5304 const struct dp_sdp
*sdp
= buffer
;
5306 if (size
< sizeof(struct dp_sdp
))
5309 if (sdp
->sdp_header
.HB0
!= 0)
5312 if (sdp
->sdp_header
.HB1
!= HDMI_INFOFRAME_TYPE_DRM
)
5316 * Least Significant Eight Bits of (Data Byte Count – 1)
5317 * 1Dh (i.e., Data Byte Count = 30 bytes).
5319 if (sdp
->sdp_header
.HB2
!= 0x1D)
5322 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
5323 if ((sdp
->sdp_header
.HB3
& 0x3) != 0)
5326 /* INFOFRAME SDP Version Number */
5327 if (((sdp
->sdp_header
.HB3
>> 2) & 0x3f) != 0x13)
5330 /* CTA Header Byte 2 (INFOFRAME Version Number) */
5331 if (sdp
->db
[0] != 1)
5334 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
5335 if (sdp
->db
[1] != HDMI_DRM_INFOFRAME_SIZE
)
5338 ret
= hdmi_drm_infoframe_unpack_only(drm_infoframe
, &sdp
->db
[2],
5339 HDMI_DRM_INFOFRAME_SIZE
);
5344 static void intel_read_dp_vsc_sdp(struct intel_encoder
*encoder
,
5345 struct intel_crtc_state
*crtc_state
,
5346 struct drm_dp_vsc_sdp
*vsc
)
5348 struct intel_digital_port
*dig_port
= enc_to_dig_port(encoder
);
5349 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
5350 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5351 unsigned int type
= DP_SDP_VSC
;
5352 struct dp_sdp sdp
= {};
5355 /* When PSR is enabled, VSC SDP is handled by PSR routine */
5356 if (intel_psr_enabled(intel_dp
))
5359 if ((crtc_state
->infoframes
.enable
&
5360 intel_hdmi_infoframe_enable(type
)) == 0)
5363 dig_port
->read_infoframe(encoder
, crtc_state
, type
, &sdp
, sizeof(sdp
));
5365 ret
= intel_dp_vsc_sdp_unpack(vsc
, &sdp
, sizeof(sdp
));
5368 drm_dbg_kms(&dev_priv
->drm
, "Failed to unpack DP VSC SDP\n");
5371 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder
*encoder
,
5372 struct intel_crtc_state
*crtc_state
,
5373 struct hdmi_drm_infoframe
*drm_infoframe
)
5375 struct intel_digital_port
*dig_port
= enc_to_dig_port(encoder
);
5376 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5377 unsigned int type
= HDMI_PACKET_TYPE_GAMUT_METADATA
;
5378 struct dp_sdp sdp
= {};
5381 if ((crtc_state
->infoframes
.enable
&
5382 intel_hdmi_infoframe_enable(type
)) == 0)
5385 dig_port
->read_infoframe(encoder
, crtc_state
, type
, &sdp
,
5388 ret
= intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe
, &sdp
,
5392 drm_dbg_kms(&dev_priv
->drm
,
5393 "Failed to unpack DP HDR Metadata Infoframe SDP\n");
5396 void intel_read_dp_sdp(struct intel_encoder
*encoder
,
5397 struct intel_crtc_state
*crtc_state
,
5400 if (encoder
->type
!= INTEL_OUTPUT_DDI
)
5405 intel_read_dp_vsc_sdp(encoder
, crtc_state
,
5406 &crtc_state
->infoframes
.vsc
);
5408 case HDMI_PACKET_TYPE_GAMUT_METADATA
:
5409 intel_read_dp_hdr_metadata_infoframe_sdp(encoder
, crtc_state
,
5410 &crtc_state
->infoframes
.drm
.drm
);
5418 static u8
intel_dp_autotest_link_training(struct intel_dp
*intel_dp
)
5420 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
5423 u8 test_lane_count
, test_link_bw
;
5427 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
5428 status
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_LANE_COUNT
,
5432 drm_dbg_kms(&i915
->drm
, "Lane count read failed\n");
5435 test_lane_count
&= DP_MAX_LANE_COUNT_MASK
;
5437 status
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_LINK_RATE
,
5440 drm_dbg_kms(&i915
->drm
, "Link Rate read failed\n");
5443 test_link_rate
= drm_dp_bw_code_to_link_rate(test_link_bw
);
5445 /* Validate the requested link rate and lane count */
5446 if (!intel_dp_link_params_valid(intel_dp
, test_link_rate
,
5450 intel_dp
->compliance
.test_lane_count
= test_lane_count
;
5451 intel_dp
->compliance
.test_link_rate
= test_link_rate
;
5456 static u8
intel_dp_autotest_video_pattern(struct intel_dp
*intel_dp
)
5458 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
5461 __be16 h_width
, v_height
;
5464 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
5465 status
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_PATTERN
,
5468 drm_dbg_kms(&i915
->drm
, "Test pattern read failed\n");
5471 if (test_pattern
!= DP_COLOR_RAMP
)
5474 status
= drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_H_WIDTH_HI
,
5477 drm_dbg_kms(&i915
->drm
, "H Width read failed\n");
5481 status
= drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_V_HEIGHT_HI
,
5484 drm_dbg_kms(&i915
->drm
, "V Height read failed\n");
5488 status
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_MISC0
,
5491 drm_dbg_kms(&i915
->drm
, "TEST MISC read failed\n");
5494 if ((test_misc
& DP_TEST_COLOR_FORMAT_MASK
) != DP_COLOR_FORMAT_RGB
)
5496 if (test_misc
& DP_TEST_DYNAMIC_RANGE_CEA
)
5498 switch (test_misc
& DP_TEST_BIT_DEPTH_MASK
) {
5499 case DP_TEST_BIT_DEPTH_6
:
5500 intel_dp
->compliance
.test_data
.bpc
= 6;
5502 case DP_TEST_BIT_DEPTH_8
:
5503 intel_dp
->compliance
.test_data
.bpc
= 8;
5509 intel_dp
->compliance
.test_data
.video_pattern
= test_pattern
;
5510 intel_dp
->compliance
.test_data
.hdisplay
= be16_to_cpu(h_width
);
5511 intel_dp
->compliance
.test_data
.vdisplay
= be16_to_cpu(v_height
);
5512 /* Set test active flag here so userspace doesn't interrupt things */
5513 intel_dp
->compliance
.test_active
= true;
5518 static u8
intel_dp_autotest_edid(struct intel_dp
*intel_dp
)
5520 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
5521 u8 test_result
= DP_TEST_ACK
;
5522 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
5523 struct drm_connector
*connector
= &intel_connector
->base
;
5525 if (intel_connector
->detect_edid
== NULL
||
5526 connector
->edid_corrupt
||
5527 intel_dp
->aux
.i2c_defer_count
> 6) {
5528 /* Check EDID read for NACKs, DEFERs and corruption
5529 * (DP CTS 1.2 Core r1.1)
5530 * 4.2.2.4 : Failed EDID read, I2C_NAK
5531 * 4.2.2.5 : Failed EDID read, I2C_DEFER
5532 * 4.2.2.6 : EDID corruption detected
5533 * Use failsafe mode for all cases
5535 if (intel_dp
->aux
.i2c_nack_count
> 0 ||
5536 intel_dp
->aux
.i2c_defer_count
> 0)
5537 drm_dbg_kms(&i915
->drm
,
5538 "EDID read had %d NACKs, %d DEFERs\n",
5539 intel_dp
->aux
.i2c_nack_count
,
5540 intel_dp
->aux
.i2c_defer_count
);
5541 intel_dp
->compliance
.test_data
.edid
= INTEL_DP_RESOLUTION_FAILSAFE
;
5543 struct edid
*block
= intel_connector
->detect_edid
;
5545 /* We have to write the checksum
5546 * of the last block read
5548 block
+= intel_connector
->detect_edid
->extensions
;
5550 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_EDID_CHECKSUM
,
5551 block
->checksum
) <= 0)
5552 drm_dbg_kms(&i915
->drm
,
5553 "Failed to write EDID checksum\n");
5555 test_result
= DP_TEST_ACK
| DP_TEST_EDID_CHECKSUM_WRITE
;
5556 intel_dp
->compliance
.test_data
.edid
= INTEL_DP_RESOLUTION_PREFERRED
;
5559 /* Set test active flag here so userspace doesn't interrupt things */
5560 intel_dp
->compliance
.test_active
= true;
5565 static void intel_dp_phy_pattern_update(struct intel_dp
*intel_dp
,
5566 const struct intel_crtc_state
*crtc_state
)
5568 struct drm_i915_private
*dev_priv
=
5569 to_i915(dp_to_dig_port(intel_dp
)->base
.base
.dev
);
5570 struct drm_dp_phy_test_params
*data
=
5571 &intel_dp
->compliance
.test_data
.phytest
;
5572 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
5573 enum pipe pipe
= crtc
->pipe
;
5576 switch (data
->phy_pattern
) {
5577 case DP_PHY_TEST_PATTERN_NONE
:
5578 DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
5579 intel_de_write(dev_priv
, DDI_DP_COMP_CTL(pipe
), 0x0);
5581 case DP_PHY_TEST_PATTERN_D10_2
:
5582 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
5583 intel_de_write(dev_priv
, DDI_DP_COMP_CTL(pipe
),
5584 DDI_DP_COMP_CTL_ENABLE
| DDI_DP_COMP_CTL_D10_2
);
5586 case DP_PHY_TEST_PATTERN_ERROR_COUNT
:
5587 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
5588 intel_de_write(dev_priv
, DDI_DP_COMP_CTL(pipe
),
5589 DDI_DP_COMP_CTL_ENABLE
|
5590 DDI_DP_COMP_CTL_SCRAMBLED_0
);
5592 case DP_PHY_TEST_PATTERN_PRBS7
:
5593 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
5594 intel_de_write(dev_priv
, DDI_DP_COMP_CTL(pipe
),
5595 DDI_DP_COMP_CTL_ENABLE
| DDI_DP_COMP_CTL_PRBS7
);
5597 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM
:
5599 * FIXME: Ideally pattern should come from DPCD 0x250. As
5600 * current firmware of DPR-100 could not set it, so hardcoding
5601 * now for complaince test.
5603 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
5604 pattern_val
= 0x3e0f83e0;
5605 intel_de_write(dev_priv
, DDI_DP_COMP_PAT(pipe
, 0), pattern_val
);
5606 pattern_val
= 0x0f83e0f8;
5607 intel_de_write(dev_priv
, DDI_DP_COMP_PAT(pipe
, 1), pattern_val
);
5608 pattern_val
= 0x0000f83e;
5609 intel_de_write(dev_priv
, DDI_DP_COMP_PAT(pipe
, 2), pattern_val
);
5610 intel_de_write(dev_priv
, DDI_DP_COMP_CTL(pipe
),
5611 DDI_DP_COMP_CTL_ENABLE
|
5612 DDI_DP_COMP_CTL_CUSTOM80
);
5614 case DP_PHY_TEST_PATTERN_CP2520
:
5616 * FIXME: Ideally pattern should come from DPCD 0x24A. As
5617 * current firmware of DPR-100 could not set it, so hardcoding
5618 * now for complaince test.
5620 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
5622 intel_de_write(dev_priv
, DDI_DP_COMP_CTL(pipe
),
5623 DDI_DP_COMP_CTL_ENABLE
| DDI_DP_COMP_CTL_HBR2
|
5627 WARN(1, "Invalid Phy Test Pattern\n");
5632 intel_dp_autotest_phy_ddi_disable(struct intel_dp
*intel_dp
,
5633 const struct intel_crtc_state
*crtc_state
)
5635 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5636 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
5637 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5638 struct intel_crtc
*crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
5639 enum pipe pipe
= crtc
->pipe
;
5640 u32 trans_ddi_func_ctl_value
, trans_conf_value
, dp_tp_ctl_value
;
5642 trans_ddi_func_ctl_value
= intel_de_read(dev_priv
,
5643 TRANS_DDI_FUNC_CTL(pipe
));
5644 trans_conf_value
= intel_de_read(dev_priv
, PIPECONF(pipe
));
5645 dp_tp_ctl_value
= intel_de_read(dev_priv
, TGL_DP_TP_CTL(pipe
));
5647 trans_ddi_func_ctl_value
&= ~(TRANS_DDI_FUNC_ENABLE
|
5648 TGL_TRANS_DDI_PORT_MASK
);
5649 trans_conf_value
&= ~PIPECONF_ENABLE
;
5650 dp_tp_ctl_value
&= ~DP_TP_CTL_ENABLE
;
5652 intel_de_write(dev_priv
, PIPECONF(pipe
), trans_conf_value
);
5653 intel_de_write(dev_priv
, TRANS_DDI_FUNC_CTL(pipe
),
5654 trans_ddi_func_ctl_value
);
5655 intel_de_write(dev_priv
, TGL_DP_TP_CTL(pipe
), dp_tp_ctl_value
);
5659 intel_dp_autotest_phy_ddi_enable(struct intel_dp
*intel_dp
,
5660 const struct intel_crtc_state
*crtc_state
)
5662 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5663 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
5664 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5665 enum port port
= dig_port
->base
.port
;
5666 struct intel_crtc
*crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
5667 enum pipe pipe
= crtc
->pipe
;
5668 u32 trans_ddi_func_ctl_value
, trans_conf_value
, dp_tp_ctl_value
;
5670 trans_ddi_func_ctl_value
= intel_de_read(dev_priv
,
5671 TRANS_DDI_FUNC_CTL(pipe
));
5672 trans_conf_value
= intel_de_read(dev_priv
, PIPECONF(pipe
));
5673 dp_tp_ctl_value
= intel_de_read(dev_priv
, TGL_DP_TP_CTL(pipe
));
5675 trans_ddi_func_ctl_value
|= TRANS_DDI_FUNC_ENABLE
|
5676 TGL_TRANS_DDI_SELECT_PORT(port
);
5677 trans_conf_value
|= PIPECONF_ENABLE
;
5678 dp_tp_ctl_value
|= DP_TP_CTL_ENABLE
;
5680 intel_de_write(dev_priv
, PIPECONF(pipe
), trans_conf_value
);
5681 intel_de_write(dev_priv
, TGL_DP_TP_CTL(pipe
), dp_tp_ctl_value
);
5682 intel_de_write(dev_priv
, TRANS_DDI_FUNC_CTL(pipe
),
5683 trans_ddi_func_ctl_value
);
5686 static void intel_dp_process_phy_request(struct intel_dp
*intel_dp
,
5687 const struct intel_crtc_state
*crtc_state
)
5689 struct drm_dp_phy_test_params
*data
=
5690 &intel_dp
->compliance
.test_data
.phytest
;
5691 u8 link_status
[DP_LINK_STATUS_SIZE
];
5693 if (drm_dp_dpcd_read_phy_link_status(&intel_dp
->aux
, DP_PHY_DPRX
,
5695 DRM_DEBUG_KMS("failed to get link status\n");
5699 /* retrieve vswing & pre-emphasis setting */
5700 intel_dp_get_adjust_train(intel_dp
, crtc_state
, DP_PHY_DPRX
,
5703 intel_dp_autotest_phy_ddi_disable(intel_dp
, crtc_state
);
5705 intel_dp_set_signal_levels(intel_dp
, crtc_state
);
5707 intel_dp_phy_pattern_update(intel_dp
, crtc_state
);
5709 intel_dp_autotest_phy_ddi_enable(intel_dp
, crtc_state
);
5711 drm_dp_set_phy_test_pattern(&intel_dp
->aux
, data
,
5712 link_status
[DP_DPCD_REV
]);
5715 static u8
intel_dp_autotest_phy_pattern(struct intel_dp
*intel_dp
)
5717 struct drm_dp_phy_test_params
*data
=
5718 &intel_dp
->compliance
.test_data
.phytest
;
5720 if (drm_dp_get_phy_test_pattern(&intel_dp
->aux
, data
)) {
5721 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
5725 /* Set test active flag here so userspace doesn't interrupt things */
5726 intel_dp
->compliance
.test_active
= true;
5731 static void intel_dp_handle_test_request(struct intel_dp
*intel_dp
)
5733 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
5734 u8 response
= DP_TEST_NAK
;
5738 status
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_REQUEST
, &request
);
5740 drm_dbg_kms(&i915
->drm
,
5741 "Could not read test request from sink\n");
5746 case DP_TEST_LINK_TRAINING
:
5747 drm_dbg_kms(&i915
->drm
, "LINK_TRAINING test requested\n");
5748 response
= intel_dp_autotest_link_training(intel_dp
);
5750 case DP_TEST_LINK_VIDEO_PATTERN
:
5751 drm_dbg_kms(&i915
->drm
, "TEST_PATTERN test requested\n");
5752 response
= intel_dp_autotest_video_pattern(intel_dp
);
5754 case DP_TEST_LINK_EDID_READ
:
5755 drm_dbg_kms(&i915
->drm
, "EDID test requested\n");
5756 response
= intel_dp_autotest_edid(intel_dp
);
5758 case DP_TEST_LINK_PHY_TEST_PATTERN
:
5759 drm_dbg_kms(&i915
->drm
, "PHY_PATTERN test requested\n");
5760 response
= intel_dp_autotest_phy_pattern(intel_dp
);
5763 drm_dbg_kms(&i915
->drm
, "Invalid test request '%02x'\n",
5768 if (response
& DP_TEST_ACK
)
5769 intel_dp
->compliance
.test_type
= request
;
5772 status
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_RESPONSE
, response
);
5774 drm_dbg_kms(&i915
->drm
,
5775 "Could not write test response to sink\n");
5779 * intel_dp_check_mst_status - service any pending MST interrupts, check link status
5780 * @intel_dp: Intel DP struct
5782 * Read any pending MST interrupts, call MST core to handle these and ack the
5783 * interrupts. Check if the main and AUX link state is ok.
5786 * - %true if pending interrupts were serviced (or no interrupts were
5787 * pending) w/o detecting an error condition.
5788 * - %false if an error condition - like AUX failure or a loss of link - is
5789 * detected, which needs servicing from the hotplug work.
5792 intel_dp_check_mst_status(struct intel_dp
*intel_dp
)
5794 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
5795 bool link_ok
= true;
5797 drm_WARN_ON_ONCE(&i915
->drm
, intel_dp
->active_mst_links
< 0);
5800 u8 esi
[DP_DPRX_ESI_LEN
] = {};
5804 if (!intel_dp_get_sink_irq_esi(intel_dp
, esi
)) {
5805 drm_dbg_kms(&i915
->drm
,
5806 "failed to get ESI - device may have failed\n");
5812 /* check link status - esi[10] = 0x200c */
5813 if (intel_dp
->active_mst_links
> 0 && link_ok
&&
5814 !drm_dp_channel_eq_ok(&esi
[10], intel_dp
->lane_count
)) {
5815 drm_dbg_kms(&i915
->drm
,
5816 "channel EQ not ok, retraining\n");
5820 drm_dbg_kms(&i915
->drm
, "got esi %3ph\n", esi
);
5822 drm_dp_mst_hpd_irq(&intel_dp
->mst_mgr
, esi
, &handled
);
5826 for (retry
= 0; retry
< 3; retry
++) {
5829 wret
= drm_dp_dpcd_write(&intel_dp
->aux
,
5830 DP_SINK_COUNT_ESI
+1,
5841 intel_dp_needs_link_retrain(struct intel_dp
*intel_dp
)
5843 u8 link_status
[DP_LINK_STATUS_SIZE
];
5845 if (!intel_dp
->link_trained
)
5849 * While PSR source HW is enabled, it will control main-link sending
5850 * frames, enabling and disabling it so trying to do a retrain will fail
5851 * as the link would or not be on or it could mix training patterns
5852 * and frame data at the same time causing retrain to fail.
5853 * Also when exiting PSR, HW will retrain the link anyways fixing
5854 * any link status error.
5856 if (intel_psr_enabled(intel_dp
))
5859 if (drm_dp_dpcd_read_phy_link_status(&intel_dp
->aux
, DP_PHY_DPRX
,
5864 * Validate the cached values of intel_dp->link_rate and
5865 * intel_dp->lane_count before attempting to retrain.
5867 * FIXME would be nice to user the crtc state here, but since
5868 * we need to call this from the short HPD handler that seems
5871 if (!intel_dp_link_params_valid(intel_dp
, intel_dp
->link_rate
,
5872 intel_dp
->lane_count
))
5875 /* Retrain if Channel EQ or CR not ok */
5876 return !drm_dp_channel_eq_ok(link_status
, intel_dp
->lane_count
);
5879 static bool intel_dp_has_connector(struct intel_dp
*intel_dp
,
5880 const struct drm_connector_state
*conn_state
)
5882 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
5883 struct intel_encoder
*encoder
;
5886 if (!conn_state
->best_encoder
)
5890 encoder
= &dp_to_dig_port(intel_dp
)->base
;
5891 if (conn_state
->best_encoder
== &encoder
->base
)
5895 for_each_pipe(i915
, pipe
) {
5896 encoder
= &intel_dp
->mst_encoders
[pipe
]->base
;
5897 if (conn_state
->best_encoder
== &encoder
->base
)
5904 static int intel_dp_prep_link_retrain(struct intel_dp
*intel_dp
,
5905 struct drm_modeset_acquire_ctx
*ctx
,
5908 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
5909 struct drm_connector_list_iter conn_iter
;
5910 struct intel_connector
*connector
;
5915 if (!intel_dp_needs_link_retrain(intel_dp
))
5918 drm_connector_list_iter_begin(&i915
->drm
, &conn_iter
);
5919 for_each_intel_connector_iter(connector
, &conn_iter
) {
5920 struct drm_connector_state
*conn_state
=
5921 connector
->base
.state
;
5922 struct intel_crtc_state
*crtc_state
;
5923 struct intel_crtc
*crtc
;
5925 if (!intel_dp_has_connector(intel_dp
, conn_state
))
5928 crtc
= to_intel_crtc(conn_state
->crtc
);
5932 ret
= drm_modeset_lock(&crtc
->base
.mutex
, ctx
);
5936 crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
5938 drm_WARN_ON(&i915
->drm
, !intel_crtc_has_dp_encoder(crtc_state
));
5940 if (!crtc_state
->hw
.active
)
5943 if (conn_state
->commit
&&
5944 !try_wait_for_completion(&conn_state
->commit
->hw_done
))
5947 *crtc_mask
|= drm_crtc_mask(&crtc
->base
);
5949 drm_connector_list_iter_end(&conn_iter
);
5951 if (!intel_dp_needs_link_retrain(intel_dp
))
5957 static bool intel_dp_is_connected(struct intel_dp
*intel_dp
)
5959 struct intel_connector
*connector
= intel_dp
->attached_connector
;
5961 return connector
->base
.status
== connector_status_connected
||
5965 int intel_dp_retrain_link(struct intel_encoder
*encoder
,
5966 struct drm_modeset_acquire_ctx
*ctx
)
5968 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
5969 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
5970 struct intel_crtc
*crtc
;
5974 if (!intel_dp_is_connected(intel_dp
))
5977 ret
= drm_modeset_lock(&dev_priv
->drm
.mode_config
.connection_mutex
,
5982 ret
= intel_dp_prep_link_retrain(intel_dp
, ctx
, &crtc_mask
);
5989 drm_dbg_kms(&dev_priv
->drm
, "[ENCODER:%d:%s] retraining link\n",
5990 encoder
->base
.base
.id
, encoder
->base
.name
);
5992 for_each_intel_crtc_mask(&dev_priv
->drm
, crtc
, crtc_mask
) {
5993 const struct intel_crtc_state
*crtc_state
=
5994 to_intel_crtc_state(crtc
->base
.state
);
5996 /* Suppress underruns caused by re-training */
5997 intel_set_cpu_fifo_underrun_reporting(dev_priv
, crtc
->pipe
, false);
5998 if (crtc_state
->has_pch_encoder
)
5999 intel_set_pch_fifo_underrun_reporting(dev_priv
,
6000 intel_crtc_pch_transcoder(crtc
), false);
6003 for_each_intel_crtc_mask(&dev_priv
->drm
, crtc
, crtc_mask
) {
6004 const struct intel_crtc_state
*crtc_state
=
6005 to_intel_crtc_state(crtc
->base
.state
);
6007 /* retrain on the MST master transcoder */
6008 if (INTEL_GEN(dev_priv
) >= 12 &&
6009 intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_DP_MST
) &&
6010 !intel_dp_mst_is_master_trans(crtc_state
))
6013 intel_dp_start_link_train(intel_dp
, crtc_state
);
6014 intel_dp_stop_link_train(intel_dp
, crtc_state
);
6018 for_each_intel_crtc_mask(&dev_priv
->drm
, crtc
, crtc_mask
) {
6019 const struct intel_crtc_state
*crtc_state
=
6020 to_intel_crtc_state(crtc
->base
.state
);
6022 /* Keep underrun reporting disabled until things are stable */
6023 intel_wait_for_vblank(dev_priv
, crtc
->pipe
);
6025 intel_set_cpu_fifo_underrun_reporting(dev_priv
, crtc
->pipe
, true);
6026 if (crtc_state
->has_pch_encoder
)
6027 intel_set_pch_fifo_underrun_reporting(dev_priv
,
6028 intel_crtc_pch_transcoder(crtc
), true);
6034 static int intel_dp_prep_phy_test(struct intel_dp
*intel_dp
,
6035 struct drm_modeset_acquire_ctx
*ctx
,
6038 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
6039 struct drm_connector_list_iter conn_iter
;
6040 struct intel_connector
*connector
;
6045 drm_connector_list_iter_begin(&i915
->drm
, &conn_iter
);
6046 for_each_intel_connector_iter(connector
, &conn_iter
) {
6047 struct drm_connector_state
*conn_state
=
6048 connector
->base
.state
;
6049 struct intel_crtc_state
*crtc_state
;
6050 struct intel_crtc
*crtc
;
6052 if (!intel_dp_has_connector(intel_dp
, conn_state
))
6055 crtc
= to_intel_crtc(conn_state
->crtc
);
6059 ret
= drm_modeset_lock(&crtc
->base
.mutex
, ctx
);
6063 crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
6065 drm_WARN_ON(&i915
->drm
, !intel_crtc_has_dp_encoder(crtc_state
));
6067 if (!crtc_state
->hw
.active
)
6070 if (conn_state
->commit
&&
6071 !try_wait_for_completion(&conn_state
->commit
->hw_done
))
6074 *crtc_mask
|= drm_crtc_mask(&crtc
->base
);
6076 drm_connector_list_iter_end(&conn_iter
);
6081 static int intel_dp_do_phy_test(struct intel_encoder
*encoder
,
6082 struct drm_modeset_acquire_ctx
*ctx
)
6084 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
6085 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
6086 struct intel_crtc
*crtc
;
6090 ret
= drm_modeset_lock(&dev_priv
->drm
.mode_config
.connection_mutex
,
6095 ret
= intel_dp_prep_phy_test(intel_dp
, ctx
, &crtc_mask
);
6102 drm_dbg_kms(&dev_priv
->drm
, "[ENCODER:%d:%s] PHY test\n",
6103 encoder
->base
.base
.id
, encoder
->base
.name
);
6105 for_each_intel_crtc_mask(&dev_priv
->drm
, crtc
, crtc_mask
) {
6106 const struct intel_crtc_state
*crtc_state
=
6107 to_intel_crtc_state(crtc
->base
.state
);
6109 /* test on the MST master transcoder */
6110 if (INTEL_GEN(dev_priv
) >= 12 &&
6111 intel_crtc_has_type(crtc_state
, INTEL_OUTPUT_DP_MST
) &&
6112 !intel_dp_mst_is_master_trans(crtc_state
))
6115 intel_dp_process_phy_request(intel_dp
, crtc_state
);
6122 static void intel_dp_phy_test(struct intel_encoder
*encoder
)
6124 struct drm_modeset_acquire_ctx ctx
;
6127 drm_modeset_acquire_init(&ctx
, 0);
6130 ret
= intel_dp_do_phy_test(encoder
, &ctx
);
6132 if (ret
== -EDEADLK
) {
6133 drm_modeset_backoff(&ctx
);
6140 drm_modeset_drop_locks(&ctx
);
6141 drm_modeset_acquire_fini(&ctx
);
6142 drm_WARN(encoder
->base
.dev
, ret
,
6143 "Acquiring modeset locks failed with %i\n", ret
);
6147 * If display is now connected check links status,
6148 * there has been known issues of link loss triggering
6151 * Some sinks (eg. ASUS PB287Q) seem to perform some
6152 * weird HPD ping pong during modesets. So we can apparently
6153 * end up with HPD going low during a modeset, and then
6154 * going back up soon after. And once that happens we must
6155 * retrain the link to get a picture. That's in case no
6156 * userspace component reacted to intermittent HPD dip.
6158 static enum intel_hotplug_state
6159 intel_dp_hotplug(struct intel_encoder
*encoder
,
6160 struct intel_connector
*connector
)
6162 struct intel_dp
*intel_dp
= enc_to_intel_dp(encoder
);
6163 struct drm_modeset_acquire_ctx ctx
;
6164 enum intel_hotplug_state state
;
6167 if (intel_dp
->compliance
.test_active
&&
6168 intel_dp
->compliance
.test_type
== DP_TEST_LINK_PHY_TEST_PATTERN
) {
6169 intel_dp_phy_test(encoder
);
6170 /* just do the PHY test and nothing else */
6171 return INTEL_HOTPLUG_UNCHANGED
;
6174 state
= intel_encoder_hotplug(encoder
, connector
);
6176 drm_modeset_acquire_init(&ctx
, 0);
6179 ret
= intel_dp_retrain_link(encoder
, &ctx
);
6181 if (ret
== -EDEADLK
) {
6182 drm_modeset_backoff(&ctx
);
6189 drm_modeset_drop_locks(&ctx
);
6190 drm_modeset_acquire_fini(&ctx
);
6191 drm_WARN(encoder
->base
.dev
, ret
,
6192 "Acquiring modeset locks failed with %i\n", ret
);
6195 * Keeping it consistent with intel_ddi_hotplug() and
6196 * intel_hdmi_hotplug().
6198 if (state
== INTEL_HOTPLUG_UNCHANGED
&& !connector
->hotplug_retries
)
6199 state
= INTEL_HOTPLUG_RETRY
;
6204 static void intel_dp_check_service_irq(struct intel_dp
*intel_dp
)
6206 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
6209 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x11)
6212 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
6213 DP_DEVICE_SERVICE_IRQ_VECTOR
, &val
) != 1 || !val
)
6216 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_DEVICE_SERVICE_IRQ_VECTOR
, val
);
6218 if (val
& DP_AUTOMATED_TEST_REQUEST
)
6219 intel_dp_handle_test_request(intel_dp
);
6221 if (val
& DP_CP_IRQ
)
6222 intel_hdcp_handle_cp_irq(intel_dp
->attached_connector
);
6224 if (val
& DP_SINK_SPECIFIC_IRQ
)
6225 drm_dbg_kms(&i915
->drm
, "Sink specific irq unhandled\n");
6229 * According to DP spec
6232 * 2. Configure link according to Receiver Capabilities
6233 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
6234 * 4. Check link status on receipt of hot-plug interrupt
6236 * intel_dp_short_pulse - handles short pulse interrupts
6237 * when full detection is not required.
6238 * Returns %true if short pulse is handled and full detection
6239 * is NOT required and %false otherwise.
6242 intel_dp_short_pulse(struct intel_dp
*intel_dp
)
6244 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
6245 u8 old_sink_count
= intel_dp
->sink_count
;
6249 * Clearing compliance test variables to allow capturing
6250 * of values for next automated test request.
6252 memset(&intel_dp
->compliance
, 0, sizeof(intel_dp
->compliance
));
6255 * Now read the DPCD to see if it's actually running
6256 * If the current value of sink count doesn't match with
6257 * the value that was stored earlier or dpcd read failed
6258 * we need to do full detection
6260 ret
= intel_dp_get_dpcd(intel_dp
);
6262 if ((old_sink_count
!= intel_dp
->sink_count
) || !ret
) {
6263 /* No need to proceed if we are going to do full detect */
6267 intel_dp_check_service_irq(intel_dp
);
6269 /* Handle CEC interrupts, if any */
6270 drm_dp_cec_irq(&intel_dp
->aux
);
6272 /* defer to the hotplug work for link retraining if needed */
6273 if (intel_dp_needs_link_retrain(intel_dp
))
6276 intel_psr_short_pulse(intel_dp
);
6278 switch (intel_dp
->compliance
.test_type
) {
6279 case DP_TEST_LINK_TRAINING
:
6280 drm_dbg_kms(&dev_priv
->drm
,
6281 "Link Training Compliance Test requested\n");
6282 /* Send a Hotplug Uevent to userspace to start modeset */
6283 drm_kms_helper_hotplug_event(&dev_priv
->drm
);
6285 case DP_TEST_LINK_PHY_TEST_PATTERN
:
6286 drm_dbg_kms(&dev_priv
->drm
,
6287 "PHY test pattern Compliance Test requested\n");
6289 * Schedule long hpd to do the test
6291 * FIXME get rid of the ad-hoc phy test modeset code
6292 * and properly incorporate it into the normal modeset.
6300 /* XXX this is probably wrong for multiple downstream ports */
6301 static enum drm_connector_status
6302 intel_dp_detect_dpcd(struct intel_dp
*intel_dp
)
6304 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
6305 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
6306 u8
*dpcd
= intel_dp
->dpcd
;
6309 if (drm_WARN_ON(&i915
->drm
, intel_dp_is_edp(intel_dp
)))
6310 return connector_status_connected
;
6312 lspcon_resume(dig_port
);
6314 if (!intel_dp_get_dpcd(intel_dp
))
6315 return connector_status_disconnected
;
6317 /* if there's no downstream port, we're done */
6318 if (!drm_dp_is_branch(dpcd
))
6319 return connector_status_connected
;
6321 /* If we're HPD-aware, SINK_COUNT changes dynamically */
6322 if (intel_dp_has_sink_count(intel_dp
) &&
6323 intel_dp
->downstream_ports
[0] & DP_DS_PORT_HPD
) {
6324 return intel_dp
->sink_count
?
6325 connector_status_connected
: connector_status_disconnected
;
6328 if (intel_dp_can_mst(intel_dp
))
6329 return connector_status_connected
;
6331 /* If no HPD, poke DDC gently */
6332 if (drm_probe_ddc(&intel_dp
->aux
.ddc
))
6333 return connector_status_connected
;
6335 /* Well we tried, say unknown for unreliable port types */
6336 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11) {
6337 type
= intel_dp
->downstream_ports
[0] & DP_DS_PORT_TYPE_MASK
;
6338 if (type
== DP_DS_PORT_TYPE_VGA
||
6339 type
== DP_DS_PORT_TYPE_NON_EDID
)
6340 return connector_status_unknown
;
6342 type
= intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
6343 DP_DWN_STRM_PORT_TYPE_MASK
;
6344 if (type
== DP_DWN_STRM_PORT_TYPE_ANALOG
||
6345 type
== DP_DWN_STRM_PORT_TYPE_OTHER
)
6346 return connector_status_unknown
;
6349 /* Anything else is out of spec, warn and ignore */
6350 drm_dbg_kms(&i915
->drm
, "Broken DP branch device, ignoring\n");
6351 return connector_status_disconnected
;
6354 static enum drm_connector_status
6355 edp_detect(struct intel_dp
*intel_dp
)
6357 return connector_status_connected
;
6360 static bool ibx_digital_port_connected(struct intel_encoder
*encoder
)
6362 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
6363 u32 bit
= dev_priv
->hotplug
.pch_hpd
[encoder
->hpd_pin
];
6365 return intel_de_read(dev_priv
, SDEISR
) & bit
;
6368 static bool g4x_digital_port_connected(struct intel_encoder
*encoder
)
6370 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
6373 switch (encoder
->hpd_pin
) {
6375 bit
= PORTB_HOTPLUG_LIVE_STATUS_G4X
;
6378 bit
= PORTC_HOTPLUG_LIVE_STATUS_G4X
;
6381 bit
= PORTD_HOTPLUG_LIVE_STATUS_G4X
;
6384 MISSING_CASE(encoder
->hpd_pin
);
6388 return intel_de_read(dev_priv
, PORT_HOTPLUG_STAT
) & bit
;
6391 static bool gm45_digital_port_connected(struct intel_encoder
*encoder
)
6393 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
6396 switch (encoder
->hpd_pin
) {
6398 bit
= PORTB_HOTPLUG_LIVE_STATUS_GM45
;
6401 bit
= PORTC_HOTPLUG_LIVE_STATUS_GM45
;
6404 bit
= PORTD_HOTPLUG_LIVE_STATUS_GM45
;
6407 MISSING_CASE(encoder
->hpd_pin
);
6411 return intel_de_read(dev_priv
, PORT_HOTPLUG_STAT
) & bit
;
6414 static bool ilk_digital_port_connected(struct intel_encoder
*encoder
)
6416 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
6417 u32 bit
= dev_priv
->hotplug
.hpd
[encoder
->hpd_pin
];
6419 return intel_de_read(dev_priv
, DEISR
) & bit
;
6423 * intel_digital_port_connected - is the specified port connected?
6424 * @encoder: intel_encoder
6426 * In cases where there's a connector physically connected but it can't be used
6427 * by our hardware we also return false, since the rest of the driver should
6428 * pretty much treat the port as disconnected. This is relevant for type-C
6429 * (starting on ICL) where there's ownership involved.
6431 * Return %true if port is connected, %false otherwise.
6433 bool intel_digital_port_connected(struct intel_encoder
*encoder
)
6435 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
6436 struct intel_digital_port
*dig_port
= enc_to_dig_port(encoder
);
6437 bool is_connected
= false;
6438 intel_wakeref_t wakeref
;
6440 with_intel_display_power(dev_priv
, POWER_DOMAIN_DISPLAY_CORE
, wakeref
)
6441 is_connected
= dig_port
->connected(encoder
);
6443 return is_connected
;
6446 static struct edid
*
6447 intel_dp_get_edid(struct intel_dp
*intel_dp
)
6449 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
6451 /* use cached edid if we have one */
6452 if (intel_connector
->edid
) {
6454 if (IS_ERR(intel_connector
->edid
))
6457 return drm_edid_duplicate(intel_connector
->edid
);
6459 return drm_get_edid(&intel_connector
->base
,
6460 &intel_dp
->aux
.ddc
);
6464 intel_dp_update_dfp(struct intel_dp
*intel_dp
,
6465 const struct edid
*edid
)
6467 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
6468 struct intel_connector
*connector
= intel_dp
->attached_connector
;
6470 intel_dp
->dfp
.max_bpc
=
6471 drm_dp_downstream_max_bpc(intel_dp
->dpcd
,
6472 intel_dp
->downstream_ports
, edid
);
6474 intel_dp
->dfp
.max_dotclock
=
6475 drm_dp_downstream_max_dotclock(intel_dp
->dpcd
,
6476 intel_dp
->downstream_ports
);
6478 intel_dp
->dfp
.min_tmds_clock
=
6479 drm_dp_downstream_min_tmds_clock(intel_dp
->dpcd
,
6480 intel_dp
->downstream_ports
,
6482 intel_dp
->dfp
.max_tmds_clock
=
6483 drm_dp_downstream_max_tmds_clock(intel_dp
->dpcd
,
6484 intel_dp
->downstream_ports
,
6487 drm_dbg_kms(&i915
->drm
,
6488 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n",
6489 connector
->base
.base
.id
, connector
->base
.name
,
6490 intel_dp
->dfp
.max_bpc
,
6491 intel_dp
->dfp
.max_dotclock
,
6492 intel_dp
->dfp
.min_tmds_clock
,
6493 intel_dp
->dfp
.max_tmds_clock
);
6497 intel_dp_update_420(struct intel_dp
*intel_dp
)
6499 struct drm_i915_private
*i915
= dp_to_i915(intel_dp
);
6500 struct intel_connector
*connector
= intel_dp
->attached_connector
;
6501 bool is_branch
, ycbcr_420_passthrough
, ycbcr_444_to_420
;
6503 /* No YCbCr output support on gmch platforms */
6508 * ILK doesn't seem capable of DP YCbCr output. The
6509 * displayed image is severly corrupted. SNB+ is fine.
6511 if (IS_GEN(i915
, 5))
6514 is_branch
= drm_dp_is_branch(intel_dp
->dpcd
);
6515 ycbcr_420_passthrough
=
6516 drm_dp_downstream_420_passthrough(intel_dp
->dpcd
,
6517 intel_dp
->downstream_ports
);
6518 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
6520 dp_to_dig_port(intel_dp
)->lspcon
.active
||
6521 drm_dp_downstream_444_to_420_conversion(intel_dp
->dpcd
,
6522 intel_dp
->downstream_ports
);
6524 if (INTEL_GEN(i915
) >= 11) {
6525 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
6526 intel_dp
->dfp
.ycbcr_444_to_420
=
6527 ycbcr_444_to_420
&& !ycbcr_420_passthrough
;
6529 connector
->base
.ycbcr_420_allowed
=
6530 !is_branch
|| ycbcr_444_to_420
|| ycbcr_420_passthrough
;
6532 /* 4:4:4->4:2:0 conversion is the only way */
6533 intel_dp
->dfp
.ycbcr_444_to_420
= ycbcr_444_to_420
;
6535 connector
->base
.ycbcr_420_allowed
= ycbcr_444_to_420
;
6538 drm_dbg_kms(&i915
->drm
,
6539 "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
6540 connector
->base
.base
.id
, connector
->base
.name
,
6541 yesno(connector
->base
.ycbcr_420_allowed
),
6542 yesno(intel_dp
->dfp
.ycbcr_444_to_420
));
6546 intel_dp_set_edid(struct intel_dp
*intel_dp
)
6548 struct intel_connector
*connector
= intel_dp
->attached_connector
;
6551 intel_dp_unset_edid(intel_dp
);
6552 edid
= intel_dp_get_edid(intel_dp
);
6553 connector
->detect_edid
= edid
;
6555 intel_dp_update_dfp(intel_dp
, edid
);
6556 intel_dp_update_420(intel_dp
);
6558 if (edid
&& edid
->input
& DRM_EDID_INPUT_DIGITAL
) {
6559 intel_dp
->has_hdmi_sink
= drm_detect_hdmi_monitor(edid
);
6560 intel_dp
->has_audio
= drm_detect_monitor_audio(edid
);
6563 drm_dp_cec_set_edid(&intel_dp
->aux
, edid
);
6564 intel_dp
->edid_quirks
= drm_dp_get_edid_quirks(edid
);
6568 intel_dp_unset_edid(struct intel_dp
*intel_dp
)
6570 struct intel_connector
*connector
= intel_dp
->attached_connector
;
6572 drm_dp_cec_unset_edid(&intel_dp
->aux
);
6573 kfree(connector
->detect_edid
);
6574 connector
->detect_edid
= NULL
;
6576 intel_dp
->has_hdmi_sink
= false;
6577 intel_dp
->has_audio
= false;
6578 intel_dp
->edid_quirks
= 0;
6580 intel_dp
->dfp
.max_bpc
= 0;
6581 intel_dp
->dfp
.max_dotclock
= 0;
6582 intel_dp
->dfp
.min_tmds_clock
= 0;
6583 intel_dp
->dfp
.max_tmds_clock
= 0;
6585 intel_dp
->dfp
.ycbcr_444_to_420
= false;
6586 connector
->base
.ycbcr_420_allowed
= false;
6590 intel_dp_detect(struct drm_connector
*connector
,
6591 struct drm_modeset_acquire_ctx
*ctx
,
6594 struct drm_i915_private
*dev_priv
= to_i915(connector
->dev
);
6595 struct intel_dp
*intel_dp
= intel_attached_dp(to_intel_connector(connector
));
6596 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
6597 struct intel_encoder
*encoder
= &dig_port
->base
;
6598 enum drm_connector_status status
;
6600 drm_dbg_kms(&dev_priv
->drm
, "[CONNECTOR:%d:%s]\n",
6601 connector
->base
.id
, connector
->name
);
6602 drm_WARN_ON(&dev_priv
->drm
,
6603 !drm_modeset_is_locked(&dev_priv
->drm
.mode_config
.connection_mutex
));
6605 if (!INTEL_DISPLAY_ENABLED(dev_priv
))
6606 return connector_status_disconnected
;
6608 /* Can't disconnect eDP */
6609 if (intel_dp_is_edp(intel_dp
))
6610 status
= edp_detect(intel_dp
);
6611 else if (intel_digital_port_connected(encoder
))
6612 status
= intel_dp_detect_dpcd(intel_dp
);
6614 status
= connector_status_disconnected
;
6616 if (status
== connector_status_disconnected
) {
6617 memset(&intel_dp
->compliance
, 0, sizeof(intel_dp
->compliance
));
6618 memset(intel_dp
->dsc_dpcd
, 0, sizeof(intel_dp
->dsc_dpcd
));
6620 if (intel_dp
->is_mst
) {
6621 drm_dbg_kms(&dev_priv
->drm
,
6622 "MST device may have disappeared %d vs %d\n",
6624 intel_dp
->mst_mgr
.mst_state
);
6625 intel_dp
->is_mst
= false;
6626 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
,
6633 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
6634 if (INTEL_GEN(dev_priv
) >= 11)
6635 intel_dp_get_dsc_sink_cap(intel_dp
);
6637 intel_dp_configure_mst(intel_dp
);
6640 * TODO: Reset link params when switching to MST mode, until MST
6641 * supports link training fallback params.
6643 if (intel_dp
->reset_link_params
|| intel_dp
->is_mst
) {
6644 /* Initial max link lane count */
6645 intel_dp
->max_link_lane_count
= intel_dp_max_common_lane_count(intel_dp
);
6647 /* Initial max link rate */
6648 intel_dp
->max_link_rate
= intel_dp_max_common_rate(intel_dp
);
6650 intel_dp
->reset_link_params
= false;
6653 intel_dp_print_rates(intel_dp
);
6655 if (intel_dp
->is_mst
) {
6657 * If we are in MST mode then this connector
6658 * won't appear connected or have anything
6661 status
= connector_status_disconnected
;
6666 * Some external monitors do not signal loss of link synchronization
6667 * with an IRQ_HPD, so force a link status check.
6669 if (!intel_dp_is_edp(intel_dp
)) {
6672 ret
= intel_dp_retrain_link(encoder
, ctx
);
6678 * Clearing NACK and defer counts to get their exact values
6679 * while reading EDID which are required by Compliance tests
6680 * 4.2.2.4 and 4.2.2.5
6682 intel_dp
->aux
.i2c_nack_count
= 0;
6683 intel_dp
->aux
.i2c_defer_count
= 0;
6685 intel_dp_set_edid(intel_dp
);
6686 if (intel_dp_is_edp(intel_dp
) ||
6687 to_intel_connector(connector
)->detect_edid
)
6688 status
= connector_status_connected
;
6690 intel_dp_check_service_irq(intel_dp
);
6693 if (status
!= connector_status_connected
&& !intel_dp
->is_mst
)
6694 intel_dp_unset_edid(intel_dp
);
6697 * Make sure the refs for power wells enabled during detect are
6698 * dropped to avoid a new detect cycle triggered by HPD polling.
6700 intel_display_power_flush_work(dev_priv
);
6702 if (!intel_dp_is_edp(intel_dp
))
6703 drm_dp_set_subconnector_property(connector
,
6706 intel_dp
->downstream_ports
);
6711 intel_dp_force(struct drm_connector
*connector
)
6713 struct intel_dp
*intel_dp
= intel_attached_dp(to_intel_connector(connector
));
6714 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
6715 struct intel_encoder
*intel_encoder
= &dig_port
->base
;
6716 struct drm_i915_private
*dev_priv
= to_i915(intel_encoder
->base
.dev
);
6717 enum intel_display_power_domain aux_domain
=
6718 intel_aux_power_domain(dig_port
);
6719 intel_wakeref_t wakeref
;
6721 drm_dbg_kms(&dev_priv
->drm
, "[CONNECTOR:%d:%s]\n",
6722 connector
->base
.id
, connector
->name
);
6723 intel_dp_unset_edid(intel_dp
);
6725 if (connector
->status
!= connector_status_connected
)
6728 wakeref
= intel_display_power_get(dev_priv
, aux_domain
);
6730 intel_dp_set_edid(intel_dp
);
6732 intel_display_power_put(dev_priv
, aux_domain
, wakeref
);
6735 static int intel_dp_get_modes(struct drm_connector
*connector
)
6737 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
6740 edid
= intel_connector
->detect_edid
;
6742 int ret
= intel_connector_update_modes(connector
, edid
);
6747 /* if eDP has no EDID, fall back to fixed mode */
6748 if (intel_dp_is_edp(intel_attached_dp(intel_connector
)) &&
6749 intel_connector
->panel
.fixed_mode
) {
6750 struct drm_display_mode
*mode
;
6752 mode
= drm_mode_duplicate(connector
->dev
,
6753 intel_connector
->panel
.fixed_mode
);
6755 drm_mode_probed_add(connector
, mode
);
6761 struct intel_dp
*intel_dp
= intel_attached_dp(intel_connector
);
6762 struct drm_display_mode
*mode
;
6764 mode
= drm_dp_downstream_mode(connector
->dev
,
6766 intel_dp
->downstream_ports
);
6768 drm_mode_probed_add(connector
, mode
);
6777 intel_dp_connector_register(struct drm_connector
*connector
)
6779 struct drm_i915_private
*i915
= to_i915(connector
->dev
);
6780 struct intel_dp
*intel_dp
= intel_attached_dp(to_intel_connector(connector
));
6783 ret
= intel_connector_register(connector
);
6787 drm_dbg_kms(&i915
->drm
, "registering %s bus for %s\n",
6788 intel_dp
->aux
.name
, connector
->kdev
->kobj
.name
);
6790 intel_dp
->aux
.dev
= connector
->kdev
;
6791 ret
= drm_dp_aux_register(&intel_dp
->aux
);
6793 drm_dp_cec_register_connector(&intel_dp
->aux
, connector
);
6798 intel_dp_connector_unregister(struct drm_connector
*connector
)
6800 struct intel_dp
*intel_dp
= intel_attached_dp(to_intel_connector(connector
));
6802 drm_dp_cec_unregister_connector(&intel_dp
->aux
);
6803 drm_dp_aux_unregister(&intel_dp
->aux
);
6804 intel_connector_unregister(connector
);
6807 void intel_dp_encoder_flush_work(struct drm_encoder
*encoder
)
6809 struct intel_digital_port
*dig_port
= enc_to_dig_port(to_intel_encoder(encoder
));
6810 struct intel_dp
*intel_dp
= &dig_port
->dp
;
6812 intel_dp_mst_encoder_cleanup(dig_port
);
6813 if (intel_dp_is_edp(intel_dp
)) {
6814 intel_wakeref_t wakeref
;
6816 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
6818 * vdd might still be enabled do to the delayed vdd off.
6819 * Make sure vdd is actually turned off here.
6821 with_pps_lock(intel_dp
, wakeref
)
6822 edp_panel_vdd_off_sync(intel_dp
);
6825 intel_dp_aux_fini(intel_dp
);
6828 static void intel_dp_encoder_destroy(struct drm_encoder
*encoder
)
6830 intel_dp_encoder_flush_work(encoder
);
6832 drm_encoder_cleanup(encoder
);
6833 kfree(enc_to_dig_port(to_intel_encoder(encoder
)));
6836 void intel_dp_encoder_suspend(struct intel_encoder
*intel_encoder
)
6838 struct intel_dp
*intel_dp
= enc_to_intel_dp(intel_encoder
);
6839 intel_wakeref_t wakeref
;
6841 if (!intel_dp_is_edp(intel_dp
))
6845 * vdd might still be enabled do to the delayed vdd off.
6846 * Make sure vdd is actually turned off here.
6848 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
6849 with_pps_lock(intel_dp
, wakeref
)
6850 edp_panel_vdd_off_sync(intel_dp
);
6853 void intel_dp_encoder_shutdown(struct intel_encoder
*intel_encoder
)
6855 struct intel_dp
*intel_dp
= enc_to_intel_dp(intel_encoder
);
6856 intel_wakeref_t wakeref
;
6858 if (!intel_dp_is_edp(intel_dp
))
6861 with_pps_lock(intel_dp
, wakeref
)
6862 wait_panel_power_cycle(intel_dp
);
6865 static void intel_edp_panel_vdd_sanitize(struct intel_dp
*intel_dp
)
6867 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
6868 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
6870 lockdep_assert_held(&dev_priv
->pps_mutex
);
6872 if (!edp_have_panel_vdd(intel_dp
))
6876 * The VDD bit needs a power domain reference, so if the bit is
6877 * already enabled when we boot or resume, grab this reference and
6878 * schedule a vdd off, so we don't hold on to the reference
6881 drm_dbg_kms(&dev_priv
->drm
,
6882 "VDD left on by BIOS, adjusting state tracking\n");
6883 intel_display_power_get(dev_priv
, intel_aux_power_domain(dig_port
));
6885 edp_panel_vdd_schedule_off(intel_dp
);
6888 static enum pipe
vlv_active_pipe(struct intel_dp
*intel_dp
)
6890 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
6891 struct intel_encoder
*encoder
= &dp_to_dig_port(intel_dp
)->base
;
6894 if (intel_dp_port_enabled(dev_priv
, intel_dp
->output_reg
,
6895 encoder
->port
, &pipe
))
6898 return INVALID_PIPE
;
6901 void intel_dp_encoder_reset(struct drm_encoder
*encoder
)
6903 struct drm_i915_private
*dev_priv
= to_i915(encoder
->dev
);
6904 struct intel_dp
*intel_dp
= enc_to_intel_dp(to_intel_encoder(encoder
));
6905 intel_wakeref_t wakeref
;
6907 if (!HAS_DDI(dev_priv
))
6908 intel_dp
->DP
= intel_de_read(dev_priv
, intel_dp
->output_reg
);
6910 intel_dp
->reset_link_params
= true;
6912 if (!IS_VALLEYVIEW(dev_priv
) && !IS_CHERRYVIEW(dev_priv
) &&
6913 !intel_dp_is_edp(intel_dp
))
6916 with_pps_lock(intel_dp
, wakeref
) {
6917 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
6918 intel_dp
->active_pipe
= vlv_active_pipe(intel_dp
);
6920 if (intel_dp_is_edp(intel_dp
)) {
6922 * Reinit the power sequencer, in case BIOS did
6923 * something nasty with it.
6925 intel_dp_pps_init(intel_dp
);
6926 intel_edp_panel_vdd_sanitize(intel_dp
);
6931 static int intel_modeset_tile_group(struct intel_atomic_state
*state
,
6934 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
6935 struct drm_connector_list_iter conn_iter
;
6936 struct drm_connector
*connector
;
6939 drm_connector_list_iter_begin(&dev_priv
->drm
, &conn_iter
);
6940 drm_for_each_connector_iter(connector
, &conn_iter
) {
6941 struct drm_connector_state
*conn_state
;
6942 struct intel_crtc_state
*crtc_state
;
6943 struct intel_crtc
*crtc
;
6945 if (!connector
->has_tile
||
6946 connector
->tile_group
->id
!= tile_group_id
)
6949 conn_state
= drm_atomic_get_connector_state(&state
->base
,
6951 if (IS_ERR(conn_state
)) {
6952 ret
= PTR_ERR(conn_state
);
6956 crtc
= to_intel_crtc(conn_state
->crtc
);
6961 crtc_state
= intel_atomic_get_new_crtc_state(state
, crtc
);
6962 crtc_state
->uapi
.mode_changed
= true;
6964 ret
= drm_atomic_add_affected_planes(&state
->base
, &crtc
->base
);
6968 drm_connector_list_iter_end(&conn_iter
);
6973 static int intel_modeset_affected_transcoders(struct intel_atomic_state
*state
, u8 transcoders
)
6975 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
6976 struct intel_crtc
*crtc
;
6978 if (transcoders
== 0)
6981 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
6982 struct intel_crtc_state
*crtc_state
;
6985 crtc_state
= intel_atomic_get_crtc_state(&state
->base
, crtc
);
6986 if (IS_ERR(crtc_state
))
6987 return PTR_ERR(crtc_state
);
6989 if (!crtc_state
->hw
.enable
)
6992 if (!(transcoders
& BIT(crtc_state
->cpu_transcoder
)))
6995 crtc_state
->uapi
.mode_changed
= true;
6997 ret
= drm_atomic_add_affected_connectors(&state
->base
, &crtc
->base
);
7001 ret
= drm_atomic_add_affected_planes(&state
->base
, &crtc
->base
);
7005 transcoders
&= ~BIT(crtc_state
->cpu_transcoder
);
7008 drm_WARN_ON(&dev_priv
->drm
, transcoders
!= 0);
7013 static int intel_modeset_synced_crtcs(struct intel_atomic_state
*state
,
7014 struct drm_connector
*connector
)
7016 const struct drm_connector_state
*old_conn_state
=
7017 drm_atomic_get_old_connector_state(&state
->base
, connector
);
7018 const struct intel_crtc_state
*old_crtc_state
;
7019 struct intel_crtc
*crtc
;
7022 crtc
= to_intel_crtc(old_conn_state
->crtc
);
7026 old_crtc_state
= intel_atomic_get_old_crtc_state(state
, crtc
);
7028 if (!old_crtc_state
->hw
.active
)
7031 transcoders
= old_crtc_state
->sync_mode_slaves_mask
;
7032 if (old_crtc_state
->master_transcoder
!= INVALID_TRANSCODER
)
7033 transcoders
|= BIT(old_crtc_state
->master_transcoder
);
7035 return intel_modeset_affected_transcoders(state
,
7039 static int intel_dp_connector_atomic_check(struct drm_connector
*conn
,
7040 struct drm_atomic_state
*_state
)
7042 struct drm_i915_private
*dev_priv
= to_i915(conn
->dev
);
7043 struct intel_atomic_state
*state
= to_intel_atomic_state(_state
);
7046 ret
= intel_digital_connector_atomic_check(conn
, &state
->base
);
7051 * We don't enable port sync on BDW due to missing w/as and
7052 * due to not having adjusted the modeset sequence appropriately.
7054 if (INTEL_GEN(dev_priv
) < 9)
7057 if (!intel_connector_needs_modeset(state
, conn
))
7060 if (conn
->has_tile
) {
7061 ret
= intel_modeset_tile_group(state
, conn
->tile_group
->id
);
7066 return intel_modeset_synced_crtcs(state
, conn
);
7069 static const struct drm_connector_funcs intel_dp_connector_funcs
= {
7070 .force
= intel_dp_force
,
7071 .fill_modes
= drm_helper_probe_single_connector_modes
,
7072 .atomic_get_property
= intel_digital_connector_atomic_get_property
,
7073 .atomic_set_property
= intel_digital_connector_atomic_set_property
,
7074 .late_register
= intel_dp_connector_register
,
7075 .early_unregister
= intel_dp_connector_unregister
,
7076 .destroy
= intel_connector_destroy
,
7077 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
7078 .atomic_duplicate_state
= intel_digital_connector_duplicate_state
,
7081 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs
= {
7082 .detect_ctx
= intel_dp_detect
,
7083 .get_modes
= intel_dp_get_modes
,
7084 .mode_valid
= intel_dp_mode_valid
,
7085 .atomic_check
= intel_dp_connector_atomic_check
,
7088 static const struct drm_encoder_funcs intel_dp_enc_funcs
= {
7089 .reset
= intel_dp_encoder_reset
,
7090 .destroy
= intel_dp_encoder_destroy
,
7093 static bool intel_edp_have_power(struct intel_dp
*intel_dp
)
7095 intel_wakeref_t wakeref
;
7096 bool have_power
= false;
7098 with_pps_lock(intel_dp
, wakeref
) {
7099 have_power
= edp_have_panel_power(intel_dp
) &&
7100 edp_have_panel_vdd(intel_dp
);
7107 intel_dp_hpd_pulse(struct intel_digital_port
*dig_port
, bool long_hpd
)
7109 struct drm_i915_private
*i915
= to_i915(dig_port
->base
.base
.dev
);
7110 struct intel_dp
*intel_dp
= &dig_port
->dp
;
7112 if (dig_port
->base
.type
== INTEL_OUTPUT_EDP
&&
7113 (long_hpd
|| !intel_edp_have_power(intel_dp
))) {
7115 * vdd off can generate a long/short pulse on eDP which
7116 * would require vdd on to handle it, and thus we
7117 * would end up in an endless cycle of
7118 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
7120 drm_dbg_kms(&i915
->drm
,
7121 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
7122 long_hpd
? "long" : "short",
7123 dig_port
->base
.base
.base
.id
,
7124 dig_port
->base
.base
.name
);
7128 drm_dbg_kms(&i915
->drm
, "got hpd irq on [ENCODER:%d:%s] - %s\n",
7129 dig_port
->base
.base
.base
.id
,
7130 dig_port
->base
.base
.name
,
7131 long_hpd
? "long" : "short");
7134 intel_dp
->reset_link_params
= true;
7138 if (intel_dp
->is_mst
) {
7139 if (!intel_dp_check_mst_status(intel_dp
))
7141 } else if (!intel_dp_short_pulse(intel_dp
)) {
7148 /* check the VBT to see whether the eDP is on another port */
7149 bool intel_dp_is_port_edp(struct drm_i915_private
*dev_priv
, enum port port
)
7152 * eDP not supported on g4x. so bail out early just
7153 * for a bit extra safety in case the VBT is bonkers.
7155 if (INTEL_GEN(dev_priv
) < 5)
7158 if (INTEL_GEN(dev_priv
) < 9 && port
== PORT_A
)
7161 return intel_bios_is_port_edp(dev_priv
, port
);
7165 intel_dp_add_properties(struct intel_dp
*intel_dp
, struct drm_connector
*connector
)
7167 struct drm_i915_private
*dev_priv
= to_i915(connector
->dev
);
7168 enum port port
= dp_to_dig_port(intel_dp
)->base
.port
;
7170 if (!intel_dp_is_edp(intel_dp
))
7171 drm_connector_attach_dp_subconnector_property(connector
);
7173 if (!IS_G4X(dev_priv
) && port
!= PORT_A
)
7174 intel_attach_force_audio_property(connector
);
7176 intel_attach_broadcast_rgb_property(connector
);
7177 if (HAS_GMCH(dev_priv
))
7178 drm_connector_attach_max_bpc_property(connector
, 6, 10);
7179 else if (INTEL_GEN(dev_priv
) >= 5)
7180 drm_connector_attach_max_bpc_property(connector
, 6, 12);
7182 intel_attach_colorspace_property(connector
);
7184 if (IS_GEMINILAKE(dev_priv
) || INTEL_GEN(dev_priv
) >= 11)
7185 drm_object_attach_property(&connector
->base
,
7186 connector
->dev
->mode_config
.hdr_output_metadata_property
,
7189 if (intel_dp_is_edp(intel_dp
)) {
7190 u32 allowed_scalers
;
7192 allowed_scalers
= BIT(DRM_MODE_SCALE_ASPECT
) | BIT(DRM_MODE_SCALE_FULLSCREEN
);
7193 if (!HAS_GMCH(dev_priv
))
7194 allowed_scalers
|= BIT(DRM_MODE_SCALE_CENTER
);
7196 drm_connector_attach_scaling_mode_property(connector
, allowed_scalers
);
7198 connector
->state
->scaling_mode
= DRM_MODE_SCALE_ASPECT
;
7203 static void intel_dp_init_panel_power_timestamps(struct intel_dp
*intel_dp
)
7205 intel_dp
->panel_power_off_time
= ktime_get_boottime();
7206 intel_dp
->last_power_on
= jiffies
;
7207 intel_dp
->last_backlight_off
= jiffies
;
7211 intel_pps_readout_hw_state(struct intel_dp
*intel_dp
, struct edp_power_seq
*seq
)
7213 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
7214 u32 pp_on
, pp_off
, pp_ctl
;
7215 struct pps_registers regs
;
7217 intel_pps_get_registers(intel_dp
, ®s
);
7219 pp_ctl
= ilk_get_pp_control(intel_dp
);
7221 /* Ensure PPS is unlocked */
7222 if (!HAS_DDI(dev_priv
))
7223 intel_de_write(dev_priv
, regs
.pp_ctrl
, pp_ctl
);
7225 pp_on
= intel_de_read(dev_priv
, regs
.pp_on
);
7226 pp_off
= intel_de_read(dev_priv
, regs
.pp_off
);
7228 /* Pull timing values out of registers */
7229 seq
->t1_t3
= REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK
, pp_on
);
7230 seq
->t8
= REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK
, pp_on
);
7231 seq
->t9
= REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK
, pp_off
);
7232 seq
->t10
= REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK
, pp_off
);
7234 if (i915_mmio_reg_valid(regs
.pp_div
)) {
7237 pp_div
= intel_de_read(dev_priv
, regs
.pp_div
);
7239 seq
->t11_t12
= REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK
, pp_div
) * 1000;
7241 seq
->t11_t12
= REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK
, pp_ctl
) * 1000;
7246 intel_pps_dump_state(const char *state_name
, const struct edp_power_seq
*seq
)
7248 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
7250 seq
->t1_t3
, seq
->t8
, seq
->t9
, seq
->t10
, seq
->t11_t12
);
7254 intel_pps_verify_state(struct intel_dp
*intel_dp
)
7256 struct edp_power_seq hw
;
7257 struct edp_power_seq
*sw
= &intel_dp
->pps_delays
;
7259 intel_pps_readout_hw_state(intel_dp
, &hw
);
7261 if (hw
.t1_t3
!= sw
->t1_t3
|| hw
.t8
!= sw
->t8
|| hw
.t9
!= sw
->t9
||
7262 hw
.t10
!= sw
->t10
|| hw
.t11_t12
!= sw
->t11_t12
) {
7263 DRM_ERROR("PPS state mismatch\n");
7264 intel_pps_dump_state("sw", sw
);
7265 intel_pps_dump_state("hw", &hw
);
7270 intel_dp_init_panel_power_sequencer(struct intel_dp
*intel_dp
)
7272 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
7273 struct edp_power_seq cur
, vbt
, spec
,
7274 *final
= &intel_dp
->pps_delays
;
7276 lockdep_assert_held(&dev_priv
->pps_mutex
);
7278 /* already initialized? */
7279 if (final
->t11_t12
!= 0)
7282 intel_pps_readout_hw_state(intel_dp
, &cur
);
7284 intel_pps_dump_state("cur", &cur
);
7286 vbt
= dev_priv
->vbt
.edp
.pps
;
7287 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
7288 * of 500ms appears to be too short. Ocassionally the panel
7289 * just fails to power back on. Increasing the delay to 800ms
7290 * seems sufficient to avoid this problem.
7292 if (dev_priv
->quirks
& QUIRK_INCREASE_T12_DELAY
) {
7293 vbt
.t11_t12
= max_t(u16
, vbt
.t11_t12
, 1300 * 10);
7294 drm_dbg_kms(&dev_priv
->drm
,
7295 "Increasing T12 panel delay as per the quirk to %d\n",
7298 /* T11_T12 delay is special and actually in units of 100ms, but zero
7299 * based in the hw (so we need to add 100 ms). But the sw vbt
7300 * table multiplies it with 1000 to make it in units of 100usec,
7302 vbt
.t11_t12
+= 100 * 10;
7304 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
7305 * our hw here, which are all in 100usec. */
7306 spec
.t1_t3
= 210 * 10;
7307 spec
.t8
= 50 * 10; /* no limit for t8, use t7 instead */
7308 spec
.t9
= 50 * 10; /* no limit for t9, make it symmetric with t8 */
7309 spec
.t10
= 500 * 10;
7310 /* This one is special and actually in units of 100ms, but zero
7311 * based in the hw (so we need to add 100 ms). But the sw vbt
7312 * table multiplies it with 1000 to make it in units of 100usec,
7314 spec
.t11_t12
= (510 + 100) * 10;
7316 intel_pps_dump_state("vbt", &vbt
);
7318 /* Use the max of the register settings and vbt. If both are
7319 * unset, fall back to the spec limits. */
7320 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
7322 max(cur.field, vbt.field))
7323 assign_final(t1_t3
);
7327 assign_final(t11_t12
);
7330 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
7331 intel_dp
->panel_power_up_delay
= get_delay(t1_t3
);
7332 intel_dp
->backlight_on_delay
= get_delay(t8
);
7333 intel_dp
->backlight_off_delay
= get_delay(t9
);
7334 intel_dp
->panel_power_down_delay
= get_delay(t10
);
7335 intel_dp
->panel_power_cycle_delay
= get_delay(t11_t12
);
7338 drm_dbg_kms(&dev_priv
->drm
,
7339 "panel power up delay %d, power down delay %d, power cycle delay %d\n",
7340 intel_dp
->panel_power_up_delay
,
7341 intel_dp
->panel_power_down_delay
,
7342 intel_dp
->panel_power_cycle_delay
);
7344 drm_dbg_kms(&dev_priv
->drm
, "backlight on delay %d, off delay %d\n",
7345 intel_dp
->backlight_on_delay
,
7346 intel_dp
->backlight_off_delay
);
7349 * We override the HW backlight delays to 1 because we do manual waits
7350 * on them. For T8, even BSpec recommends doing it. For T9, if we
7351 * don't do this, we'll end up waiting for the backlight off delay
7352 * twice: once when we do the manual sleep, and once when we disable
7353 * the panel and wait for the PP_STATUS bit to become zero.
7359 * HW has only a 100msec granularity for t11_t12 so round it up
7362 final
->t11_t12
= roundup(final
->t11_t12
, 100 * 10);
7366 intel_dp_init_panel_power_sequencer_registers(struct intel_dp
*intel_dp
,
7367 bool force_disable_vdd
)
7369 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
7370 u32 pp_on
, pp_off
, port_sel
= 0;
7371 int div
= RUNTIME_INFO(dev_priv
)->rawclk_freq
/ 1000;
7372 struct pps_registers regs
;
7373 enum port port
= dp_to_dig_port(intel_dp
)->base
.port
;
7374 const struct edp_power_seq
*seq
= &intel_dp
->pps_delays
;
7376 lockdep_assert_held(&dev_priv
->pps_mutex
);
7378 intel_pps_get_registers(intel_dp
, ®s
);
7381 * On some VLV machines the BIOS can leave the VDD
7382 * enabled even on power sequencers which aren't
7383 * hooked up to any port. This would mess up the
7384 * power domain tracking the first time we pick
7385 * one of these power sequencers for use since
7386 * edp_panel_vdd_on() would notice that the VDD was
7387 * already on and therefore wouldn't grab the power
7388 * domain reference. Disable VDD first to avoid this.
7389 * This also avoids spuriously turning the VDD on as
7390 * soon as the new power sequencer gets initialized.
7392 if (force_disable_vdd
) {
7393 u32 pp
= ilk_get_pp_control(intel_dp
);
7395 drm_WARN(&dev_priv
->drm
, pp
& PANEL_POWER_ON
,
7396 "Panel power already on\n");
7398 if (pp
& EDP_FORCE_VDD
)
7399 drm_dbg_kms(&dev_priv
->drm
,
7400 "VDD already on, disabling first\n");
7402 pp
&= ~EDP_FORCE_VDD
;
7404 intel_de_write(dev_priv
, regs
.pp_ctrl
, pp
);
7407 pp_on
= REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK
, seq
->t1_t3
) |
7408 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK
, seq
->t8
);
7409 pp_off
= REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK
, seq
->t9
) |
7410 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK
, seq
->t10
);
7412 /* Haswell doesn't have any port selection bits for the panel
7413 * power sequencer any more. */
7414 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
7415 port_sel
= PANEL_PORT_SELECT_VLV(port
);
7416 } else if (HAS_PCH_IBX(dev_priv
) || HAS_PCH_CPT(dev_priv
)) {
7419 port_sel
= PANEL_PORT_SELECT_DPA
;
7422 port_sel
= PANEL_PORT_SELECT_DPC
;
7425 port_sel
= PANEL_PORT_SELECT_DPD
;
7435 intel_de_write(dev_priv
, regs
.pp_on
, pp_on
);
7436 intel_de_write(dev_priv
, regs
.pp_off
, pp_off
);
7439 * Compute the divisor for the pp clock, simply match the Bspec formula.
7441 if (i915_mmio_reg_valid(regs
.pp_div
)) {
7442 intel_de_write(dev_priv
, regs
.pp_div
,
7443 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK
, (100 * div
) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK
, DIV_ROUND_UP(seq
->t11_t12
, 1000)));
7447 pp_ctl
= intel_de_read(dev_priv
, regs
.pp_ctrl
);
7448 pp_ctl
&= ~BXT_POWER_CYCLE_DELAY_MASK
;
7449 pp_ctl
|= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK
, DIV_ROUND_UP(seq
->t11_t12
, 1000));
7450 intel_de_write(dev_priv
, regs
.pp_ctrl
, pp_ctl
);
7453 drm_dbg_kms(&dev_priv
->drm
,
7454 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
7455 intel_de_read(dev_priv
, regs
.pp_on
),
7456 intel_de_read(dev_priv
, regs
.pp_off
),
7457 i915_mmio_reg_valid(regs
.pp_div
) ?
7458 intel_de_read(dev_priv
, regs
.pp_div
) :
7459 (intel_de_read(dev_priv
, regs
.pp_ctrl
) & BXT_POWER_CYCLE_DELAY_MASK
));
7462 static void intel_dp_pps_init(struct intel_dp
*intel_dp
)
7464 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
7466 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
7467 vlv_initial_power_sequencer_setup(intel_dp
);
7469 intel_dp_init_panel_power_sequencer(intel_dp
);
7470 intel_dp_init_panel_power_sequencer_registers(intel_dp
, false);
7475 * intel_dp_set_drrs_state - program registers for RR switch to take effect
7476 * @dev_priv: i915 device
7477 * @crtc_state: a pointer to the active intel_crtc_state
7478 * @refresh_rate: RR to be programmed
7480 * This function gets called when refresh rate (RR) has to be changed from
7481 * one frequency to another. Switches can be between high and low RR
7482 * supported by the panel or to any other RR based on media playback (in
7483 * this case, RR value needs to be passed from user space).
7485 * The caller of this function needs to take a lock on dev_priv->drrs.
7487 static void intel_dp_set_drrs_state(struct drm_i915_private
*dev_priv
,
7488 const struct intel_crtc_state
*crtc_state
,
7491 struct intel_dp
*intel_dp
= dev_priv
->drrs
.dp
;
7492 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
7493 enum drrs_refresh_rate_type index
= DRRS_HIGH_RR
;
7495 if (refresh_rate
<= 0) {
7496 drm_dbg_kms(&dev_priv
->drm
,
7497 "Refresh rate should be positive non-zero.\n");
7501 if (intel_dp
== NULL
) {
7502 drm_dbg_kms(&dev_priv
->drm
, "DRRS not supported.\n");
7507 drm_dbg_kms(&dev_priv
->drm
,
7508 "DRRS: intel_crtc not initialized\n");
7512 if (dev_priv
->drrs
.type
< SEAMLESS_DRRS_SUPPORT
) {
7513 drm_dbg_kms(&dev_priv
->drm
, "Only Seamless DRRS supported.\n");
7517 if (drm_mode_vrefresh(intel_dp
->attached_connector
->panel
.downclock_mode
) ==
7519 index
= DRRS_LOW_RR
;
7521 if (index
== dev_priv
->drrs
.refresh_rate_type
) {
7522 drm_dbg_kms(&dev_priv
->drm
,
7523 "DRRS requested for previously set RR...ignoring\n");
7527 if (!crtc_state
->hw
.active
) {
7528 drm_dbg_kms(&dev_priv
->drm
,
7529 "eDP encoder disabled. CRTC not Active\n");
7533 if (INTEL_GEN(dev_priv
) >= 8 && !IS_CHERRYVIEW(dev_priv
)) {
7536 intel_dp_set_m_n(crtc_state
, M1_N1
);
7539 intel_dp_set_m_n(crtc_state
, M2_N2
);
7543 drm_err(&dev_priv
->drm
,
7544 "Unsupported refreshrate type\n");
7546 } else if (INTEL_GEN(dev_priv
) > 6) {
7547 i915_reg_t reg
= PIPECONF(crtc_state
->cpu_transcoder
);
7550 val
= intel_de_read(dev_priv
, reg
);
7551 if (index
> DRRS_HIGH_RR
) {
7552 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
7553 val
|= PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
7555 val
|= PIPECONF_EDP_RR_MODE_SWITCH
;
7557 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
7558 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
7560 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH
;
7562 intel_de_write(dev_priv
, reg
, val
);
7565 dev_priv
->drrs
.refresh_rate_type
= index
;
7567 drm_dbg_kms(&dev_priv
->drm
, "eDP Refresh Rate set to : %dHz\n",
7572 intel_edp_drrs_enable_locked(struct intel_dp
*intel_dp
)
7574 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
7576 dev_priv
->drrs
.busy_frontbuffer_bits
= 0;
7577 dev_priv
->drrs
.dp
= intel_dp
;
7581 * intel_edp_drrs_enable - init drrs struct if supported
7582 * @intel_dp: DP struct
7583 * @crtc_state: A pointer to the active crtc state.
7585 * Initializes frontbuffer_bits and drrs.dp
7587 void intel_edp_drrs_enable(struct intel_dp
*intel_dp
,
7588 const struct intel_crtc_state
*crtc_state
)
7590 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
7592 if (!crtc_state
->has_drrs
)
7595 drm_dbg_kms(&dev_priv
->drm
, "Enabling DRRS\n");
7597 mutex_lock(&dev_priv
->drrs
.mutex
);
7599 if (dev_priv
->drrs
.dp
) {
7600 drm_warn(&dev_priv
->drm
, "DRRS already enabled\n");
7604 intel_edp_drrs_enable_locked(intel_dp
);
7607 mutex_unlock(&dev_priv
->drrs
.mutex
);
7611 intel_edp_drrs_disable_locked(struct intel_dp
*intel_dp
,
7612 const struct intel_crtc_state
*crtc_state
)
7614 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
7616 if (dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
) {
7619 refresh
= drm_mode_vrefresh(intel_dp
->attached_connector
->panel
.fixed_mode
);
7620 intel_dp_set_drrs_state(dev_priv
, crtc_state
, refresh
);
7623 dev_priv
->drrs
.dp
= NULL
;
7627 * intel_edp_drrs_disable - Disable DRRS
7628 * @intel_dp: DP struct
7629 * @old_crtc_state: Pointer to old crtc_state.
7632 void intel_edp_drrs_disable(struct intel_dp
*intel_dp
,
7633 const struct intel_crtc_state
*old_crtc_state
)
7635 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
7637 if (!old_crtc_state
->has_drrs
)
7640 mutex_lock(&dev_priv
->drrs
.mutex
);
7641 if (!dev_priv
->drrs
.dp
) {
7642 mutex_unlock(&dev_priv
->drrs
.mutex
);
7646 intel_edp_drrs_disable_locked(intel_dp
, old_crtc_state
);
7647 mutex_unlock(&dev_priv
->drrs
.mutex
);
7649 cancel_delayed_work_sync(&dev_priv
->drrs
.work
);
7653 * intel_edp_drrs_update - Update DRRS state
7654 * @intel_dp: Intel DP
7655 * @crtc_state: new CRTC state
7657 * This function will update DRRS states, disabling or enabling DRRS when
7658 * executing fastsets. For full modeset, intel_edp_drrs_disable() and
7659 * intel_edp_drrs_enable() should be called instead.
7662 intel_edp_drrs_update(struct intel_dp
*intel_dp
,
7663 const struct intel_crtc_state
*crtc_state
)
7665 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
7667 if (dev_priv
->drrs
.type
!= SEAMLESS_DRRS_SUPPORT
)
7670 mutex_lock(&dev_priv
->drrs
.mutex
);
7672 /* New state matches current one? */
7673 if (crtc_state
->has_drrs
== !!dev_priv
->drrs
.dp
)
7676 if (crtc_state
->has_drrs
)
7677 intel_edp_drrs_enable_locked(intel_dp
);
7679 intel_edp_drrs_disable_locked(intel_dp
, crtc_state
);
7682 mutex_unlock(&dev_priv
->drrs
.mutex
);
7685 static void intel_edp_drrs_downclock_work(struct work_struct
*work
)
7687 struct drm_i915_private
*dev_priv
=
7688 container_of(work
, typeof(*dev_priv
), drrs
.work
.work
);
7689 struct intel_dp
*intel_dp
;
7691 mutex_lock(&dev_priv
->drrs
.mutex
);
7693 intel_dp
= dev_priv
->drrs
.dp
;
7699 * The delayed work can race with an invalidate hence we need to
7703 if (dev_priv
->drrs
.busy_frontbuffer_bits
)
7706 if (dev_priv
->drrs
.refresh_rate_type
!= DRRS_LOW_RR
) {
7707 struct drm_crtc
*crtc
= dp_to_dig_port(intel_dp
)->base
.base
.crtc
;
7709 intel_dp_set_drrs_state(dev_priv
, to_intel_crtc(crtc
)->config
,
7710 drm_mode_vrefresh(intel_dp
->attached_connector
->panel
.downclock_mode
));
7714 mutex_unlock(&dev_priv
->drrs
.mutex
);
7718 * intel_edp_drrs_invalidate - Disable Idleness DRRS
7719 * @dev_priv: i915 device
7720 * @frontbuffer_bits: frontbuffer plane tracking bits
7722 * This function gets called everytime rendering on the given planes start.
7723 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
7725 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7727 void intel_edp_drrs_invalidate(struct drm_i915_private
*dev_priv
,
7728 unsigned int frontbuffer_bits
)
7730 struct intel_dp
*intel_dp
;
7731 struct drm_crtc
*crtc
;
7734 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
7737 cancel_delayed_work(&dev_priv
->drrs
.work
);
7739 mutex_lock(&dev_priv
->drrs
.mutex
);
7741 intel_dp
= dev_priv
->drrs
.dp
;
7743 mutex_unlock(&dev_priv
->drrs
.mutex
);
7747 crtc
= dp_to_dig_port(intel_dp
)->base
.base
.crtc
;
7748 pipe
= to_intel_crtc(crtc
)->pipe
;
7750 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
7751 dev_priv
->drrs
.busy_frontbuffer_bits
|= frontbuffer_bits
;
7753 /* invalidate means busy screen hence upclock */
7754 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
7755 intel_dp_set_drrs_state(dev_priv
, to_intel_crtc(crtc
)->config
,
7756 drm_mode_vrefresh(intel_dp
->attached_connector
->panel
.fixed_mode
));
7758 mutex_unlock(&dev_priv
->drrs
.mutex
);
7762 * intel_edp_drrs_flush - Restart Idleness DRRS
7763 * @dev_priv: i915 device
7764 * @frontbuffer_bits: frontbuffer plane tracking bits
7766 * This function gets called every time rendering on the given planes has
7767 * completed or flip on a crtc is completed. So DRRS should be upclocked
7768 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
7769 * if no other planes are dirty.
7771 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7773 void intel_edp_drrs_flush(struct drm_i915_private
*dev_priv
,
7774 unsigned int frontbuffer_bits
)
7776 struct intel_dp
*intel_dp
;
7777 struct drm_crtc
*crtc
;
7780 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
7783 cancel_delayed_work(&dev_priv
->drrs
.work
);
7785 mutex_lock(&dev_priv
->drrs
.mutex
);
7787 intel_dp
= dev_priv
->drrs
.dp
;
7789 mutex_unlock(&dev_priv
->drrs
.mutex
);
7793 crtc
= dp_to_dig_port(intel_dp
)->base
.base
.crtc
;
7794 pipe
= to_intel_crtc(crtc
)->pipe
;
7796 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
7797 dev_priv
->drrs
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
7799 /* flush means busy screen hence upclock */
7800 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
7801 intel_dp_set_drrs_state(dev_priv
, to_intel_crtc(crtc
)->config
,
7802 drm_mode_vrefresh(intel_dp
->attached_connector
->panel
.fixed_mode
));
7805 * flush also means no more activity hence schedule downclock, if all
7806 * other fbs are quiescent too
7808 if (!dev_priv
->drrs
.busy_frontbuffer_bits
)
7809 schedule_delayed_work(&dev_priv
->drrs
.work
,
7810 msecs_to_jiffies(1000));
7811 mutex_unlock(&dev_priv
->drrs
.mutex
);
7815 * DOC: Display Refresh Rate Switching (DRRS)
7817 * Display Refresh Rate Switching (DRRS) is a power conservation feature
7818 * which enables swtching between low and high refresh rates,
7819 * dynamically, based on the usage scenario. This feature is applicable
7820 * for internal panels.
7822 * Indication that the panel supports DRRS is given by the panel EDID, which
7823 * would list multiple refresh rates for one resolution.
7825 * DRRS is of 2 types - static and seamless.
7826 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
7827 * (may appear as a blink on screen) and is used in dock-undock scenario.
7828 * Seamless DRRS involves changing RR without any visual effect to the user
7829 * and can be used during normal system usage. This is done by programming
7830 * certain registers.
7832 * Support for static/seamless DRRS may be indicated in the VBT based on
7833 * inputs from the panel spec.
7835 * DRRS saves power by switching to low RR based on usage scenarios.
7837 * The implementation is based on frontbuffer tracking implementation. When
7838 * there is a disturbance on the screen triggered by user activity or a periodic
7839 * system activity, DRRS is disabled (RR is changed to high RR). When there is
7840 * no movement on screen, after a timeout of 1 second, a switch to low RR is
7843 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7844 * and intel_edp_drrs_flush() are called.
7846 * DRRS can be further extended to support other internal panels and also
7847 * the scenario of video playback wherein RR is set based on the rate
7848 * requested by userspace.
7852 * intel_dp_drrs_init - Init basic DRRS work and mutex.
7853 * @connector: eDP connector
7854 * @fixed_mode: preferred mode of panel
7856 * This function is called only once at driver load to initialize basic
7860 * Downclock mode if panel supports it, else return NULL.
7861 * DRRS support is determined by the presence of downclock mode (apart
7862 * from VBT setting).
7864 static struct drm_display_mode
*
7865 intel_dp_drrs_init(struct intel_connector
*connector
,
7866 struct drm_display_mode
*fixed_mode
)
7868 struct drm_i915_private
*dev_priv
= to_i915(connector
->base
.dev
);
7869 struct drm_display_mode
*downclock_mode
= NULL
;
7871 INIT_DELAYED_WORK(&dev_priv
->drrs
.work
, intel_edp_drrs_downclock_work
);
7872 mutex_init(&dev_priv
->drrs
.mutex
);
7874 if (INTEL_GEN(dev_priv
) <= 6) {
7875 drm_dbg_kms(&dev_priv
->drm
,
7876 "DRRS supported for Gen7 and above\n");
7880 if (dev_priv
->vbt
.drrs_type
!= SEAMLESS_DRRS_SUPPORT
) {
7881 drm_dbg_kms(&dev_priv
->drm
, "VBT doesn't support DRRS\n");
7885 downclock_mode
= intel_panel_edid_downclock_mode(connector
, fixed_mode
);
7886 if (!downclock_mode
) {
7887 drm_dbg_kms(&dev_priv
->drm
,
7888 "Downclock mode is not found. DRRS not supported\n");
7892 dev_priv
->drrs
.type
= dev_priv
->vbt
.drrs_type
;
7894 dev_priv
->drrs
.refresh_rate_type
= DRRS_HIGH_RR
;
7895 drm_dbg_kms(&dev_priv
->drm
,
7896 "seamless DRRS supported for eDP panel.\n");
7897 return downclock_mode
;
7900 static bool intel_edp_init_connector(struct intel_dp
*intel_dp
,
7901 struct intel_connector
*intel_connector
)
7903 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
7904 struct drm_device
*dev
= &dev_priv
->drm
;
7905 struct drm_connector
*connector
= &intel_connector
->base
;
7906 struct drm_display_mode
*fixed_mode
= NULL
;
7907 struct drm_display_mode
*downclock_mode
= NULL
;
7909 enum pipe pipe
= INVALID_PIPE
;
7910 intel_wakeref_t wakeref
;
7913 if (!intel_dp_is_edp(intel_dp
))
7916 INIT_DELAYED_WORK(&intel_dp
->panel_vdd_work
, edp_panel_vdd_work
);
7919 * On IBX/CPT we may get here with LVDS already registered. Since the
7920 * driver uses the only internal power sequencer available for both
7921 * eDP and LVDS bail out early in this case to prevent interfering
7922 * with an already powered-on LVDS power sequencer.
7924 if (intel_get_lvds_encoder(dev_priv
)) {
7926 !(HAS_PCH_IBX(dev_priv
) || HAS_PCH_CPT(dev_priv
)));
7927 drm_info(&dev_priv
->drm
,
7928 "LVDS was detected, not registering eDP\n");
7933 with_pps_lock(intel_dp
, wakeref
) {
7934 intel_dp_init_panel_power_timestamps(intel_dp
);
7935 intel_dp_pps_init(intel_dp
);
7936 intel_edp_panel_vdd_sanitize(intel_dp
);
7939 /* Cache DPCD and EDID for edp. */
7940 has_dpcd
= intel_edp_init_dpcd(intel_dp
);
7943 /* if this fails, presume the device is a ghost */
7944 drm_info(&dev_priv
->drm
,
7945 "failed to retrieve link info, disabling eDP\n");
7949 mutex_lock(&dev
->mode_config
.mutex
);
7950 edid
= drm_get_edid(connector
, &intel_dp
->aux
.ddc
);
7952 if (drm_add_edid_modes(connector
, edid
)) {
7953 drm_connector_update_edid_property(connector
, edid
);
7954 intel_dp
->edid_quirks
= drm_dp_get_edid_quirks(edid
);
7957 edid
= ERR_PTR(-EINVAL
);
7960 edid
= ERR_PTR(-ENOENT
);
7962 intel_connector
->edid
= edid
;
7964 fixed_mode
= intel_panel_edid_fixed_mode(intel_connector
);
7966 downclock_mode
= intel_dp_drrs_init(intel_connector
, fixed_mode
);
7968 /* fallback to VBT if available for eDP */
7970 fixed_mode
= intel_panel_vbt_fixed_mode(intel_connector
);
7971 mutex_unlock(&dev
->mode_config
.mutex
);
7973 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
7975 * Figure out the current pipe for the initial backlight setup.
7976 * If the current pipe isn't valid, try the PPS pipe, and if that
7977 * fails just assume pipe A.
7979 pipe
= vlv_active_pipe(intel_dp
);
7981 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
7982 pipe
= intel_dp
->pps_pipe
;
7984 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
7987 drm_dbg_kms(&dev_priv
->drm
,
7988 "using pipe %c for initial backlight setup\n",
7992 intel_panel_init(&intel_connector
->panel
, fixed_mode
, downclock_mode
);
7993 intel_connector
->panel
.backlight
.power
= intel_edp_backlight_power
;
7994 intel_panel_setup_backlight(connector
, pipe
);
7997 drm_connector_set_panel_orientation_with_quirk(connector
,
7998 dev_priv
->vbt
.orientation
,
7999 fixed_mode
->hdisplay
, fixed_mode
->vdisplay
);
8005 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
8007 * vdd might still be enabled do to the delayed vdd off.
8008 * Make sure vdd is actually turned off here.
8010 with_pps_lock(intel_dp
, wakeref
)
8011 edp_panel_vdd_off_sync(intel_dp
);
8016 static void intel_dp_modeset_retry_work_fn(struct work_struct
*work
)
8018 struct intel_connector
*intel_connector
;
8019 struct drm_connector
*connector
;
8021 intel_connector
= container_of(work
, typeof(*intel_connector
),
8022 modeset_retry_work
);
8023 connector
= &intel_connector
->base
;
8024 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector
->base
.id
,
8027 /* Grab the locks before changing connector property*/
8028 mutex_lock(&connector
->dev
->mode_config
.mutex
);
8029 /* Set connector link status to BAD and send a Uevent to notify
8030 * userspace to do a modeset.
8032 drm_connector_set_link_status_property(connector
,
8033 DRM_MODE_LINK_STATUS_BAD
);
8034 mutex_unlock(&connector
->dev
->mode_config
.mutex
);
8035 /* Send Hotplug uevent so userspace can reprobe */
8036 drm_kms_helper_hotplug_event(connector
->dev
);
8040 intel_dp_init_connector(struct intel_digital_port
*dig_port
,
8041 struct intel_connector
*intel_connector
)
8043 struct drm_connector
*connector
= &intel_connector
->base
;
8044 struct intel_dp
*intel_dp
= &dig_port
->dp
;
8045 struct intel_encoder
*intel_encoder
= &dig_port
->base
;
8046 struct drm_device
*dev
= intel_encoder
->base
.dev
;
8047 struct drm_i915_private
*dev_priv
= to_i915(dev
);
8048 enum port port
= intel_encoder
->port
;
8049 enum phy phy
= intel_port_to_phy(dev_priv
, port
);
8052 /* Initialize the work for modeset in case of link train failure */
8053 INIT_WORK(&intel_connector
->modeset_retry_work
,
8054 intel_dp_modeset_retry_work_fn
);
8056 if (drm_WARN(dev
, dig_port
->max_lanes
< 1,
8057 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
8058 dig_port
->max_lanes
, intel_encoder
->base
.base
.id
,
8059 intel_encoder
->base
.name
))
8062 intel_dp_set_source_rates(intel_dp
);
8064 intel_dp
->reset_link_params
= true;
8065 intel_dp
->pps_pipe
= INVALID_PIPE
;
8066 intel_dp
->active_pipe
= INVALID_PIPE
;
8068 /* Preserve the current hw state. */
8069 intel_dp
->DP
= intel_de_read(dev_priv
, intel_dp
->output_reg
);
8070 intel_dp
->attached_connector
= intel_connector
;
8072 if (intel_dp_is_port_edp(dev_priv
, port
)) {
8074 * Currently we don't support eDP on TypeC ports, although in
8075 * theory it could work on TypeC legacy ports.
8077 drm_WARN_ON(dev
, intel_phy_is_tc(dev_priv
, phy
));
8078 type
= DRM_MODE_CONNECTOR_eDP
;
8080 type
= DRM_MODE_CONNECTOR_DisplayPort
;
8083 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
8084 intel_dp
->active_pipe
= vlv_active_pipe(intel_dp
);
8087 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
8088 * for DP the encoder type can be set by the caller to
8089 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
8091 if (type
== DRM_MODE_CONNECTOR_eDP
)
8092 intel_encoder
->type
= INTEL_OUTPUT_EDP
;
8094 /* eDP only on port B and/or C on vlv/chv */
8095 if (drm_WARN_ON(dev
, (IS_VALLEYVIEW(dev_priv
) ||
8096 IS_CHERRYVIEW(dev_priv
)) &&
8097 intel_dp_is_edp(intel_dp
) &&
8098 port
!= PORT_B
&& port
!= PORT_C
))
8101 drm_dbg_kms(&dev_priv
->drm
,
8102 "Adding %s connector on [ENCODER:%d:%s]\n",
8103 type
== DRM_MODE_CONNECTOR_eDP
? "eDP" : "DP",
8104 intel_encoder
->base
.base
.id
, intel_encoder
->base
.name
);
8106 drm_connector_init(dev
, connector
, &intel_dp_connector_funcs
, type
);
8107 drm_connector_helper_add(connector
, &intel_dp_connector_helper_funcs
);
8109 if (!HAS_GMCH(dev_priv
))
8110 connector
->interlace_allowed
= true;
8111 connector
->doublescan_allowed
= 0;
8113 intel_connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
8115 intel_dp_aux_init(intel_dp
);
8117 intel_connector_attach_encoder(intel_connector
, intel_encoder
);
8119 if (HAS_DDI(dev_priv
))
8120 intel_connector
->get_hw_state
= intel_ddi_connector_get_hw_state
;
8122 intel_connector
->get_hw_state
= intel_connector_get_hw_state
;
8124 /* init MST on ports that can support it */
8125 intel_dp_mst_encoder_init(dig_port
,
8126 intel_connector
->base
.base
.id
);
8128 if (!intel_edp_init_connector(intel_dp
, intel_connector
)) {
8129 intel_dp_aux_fini(intel_dp
);
8130 intel_dp_mst_encoder_cleanup(dig_port
);
8134 intel_dp_add_properties(intel_dp
, connector
);
8136 if (is_hdcp_supported(dev_priv
, port
) && !intel_dp_is_edp(intel_dp
)) {
8137 int ret
= intel_dp_init_hdcp(dig_port
, intel_connector
);
8139 drm_dbg_kms(&dev_priv
->drm
,
8140 "HDCP init failed, skipping.\n");
8143 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
8144 * 0xd. Failure to do so will result in spurious interrupts being
8145 * generated on the port when a cable is not attached.
8147 if (IS_G45(dev_priv
)) {
8148 u32 temp
= intel_de_read(dev_priv
, PEG_BAND_GAP_DATA
);
8149 intel_de_write(dev_priv
, PEG_BAND_GAP_DATA
,
8150 (temp
& ~0xf) | 0xd);
8156 drm_connector_cleanup(connector
);
8161 bool intel_dp_init(struct drm_i915_private
*dev_priv
,
8162 i915_reg_t output_reg
,
8165 struct intel_digital_port
*dig_port
;
8166 struct intel_encoder
*intel_encoder
;
8167 struct drm_encoder
*encoder
;
8168 struct intel_connector
*intel_connector
;
8170 dig_port
= kzalloc(sizeof(*dig_port
), GFP_KERNEL
);
8174 intel_connector
= intel_connector_alloc();
8175 if (!intel_connector
)
8176 goto err_connector_alloc
;
8178 intel_encoder
= &dig_port
->base
;
8179 encoder
= &intel_encoder
->base
;
8181 mutex_init(&dig_port
->hdcp_mutex
);
8183 if (drm_encoder_init(&dev_priv
->drm
, &intel_encoder
->base
,
8184 &intel_dp_enc_funcs
, DRM_MODE_ENCODER_TMDS
,
8185 "DP %c", port_name(port
)))
8186 goto err_encoder_init
;
8188 intel_encoder
->hotplug
= intel_dp_hotplug
;
8189 intel_encoder
->compute_config
= intel_dp_compute_config
;
8190 intel_encoder
->get_hw_state
= intel_dp_get_hw_state
;
8191 intel_encoder
->get_config
= intel_dp_get_config
;
8192 intel_encoder
->sync_state
= intel_dp_sync_state
;
8193 intel_encoder
->initial_fastset_check
= intel_dp_initial_fastset_check
;
8194 intel_encoder
->update_pipe
= intel_panel_update_backlight
;
8195 intel_encoder
->suspend
= intel_dp_encoder_suspend
;
8196 intel_encoder
->shutdown
= intel_dp_encoder_shutdown
;
8197 if (IS_CHERRYVIEW(dev_priv
)) {
8198 intel_encoder
->pre_pll_enable
= chv_dp_pre_pll_enable
;
8199 intel_encoder
->pre_enable
= chv_pre_enable_dp
;
8200 intel_encoder
->enable
= vlv_enable_dp
;
8201 intel_encoder
->disable
= vlv_disable_dp
;
8202 intel_encoder
->post_disable
= chv_post_disable_dp
;
8203 intel_encoder
->post_pll_disable
= chv_dp_post_pll_disable
;
8204 } else if (IS_VALLEYVIEW(dev_priv
)) {
8205 intel_encoder
->pre_pll_enable
= vlv_dp_pre_pll_enable
;
8206 intel_encoder
->pre_enable
= vlv_pre_enable_dp
;
8207 intel_encoder
->enable
= vlv_enable_dp
;
8208 intel_encoder
->disable
= vlv_disable_dp
;
8209 intel_encoder
->post_disable
= vlv_post_disable_dp
;
8211 intel_encoder
->pre_enable
= g4x_pre_enable_dp
;
8212 intel_encoder
->enable
= g4x_enable_dp
;
8213 intel_encoder
->disable
= g4x_disable_dp
;
8214 intel_encoder
->post_disable
= g4x_post_disable_dp
;
8217 if ((IS_IVYBRIDGE(dev_priv
) && port
== PORT_A
) ||
8218 (HAS_PCH_CPT(dev_priv
) && port
!= PORT_A
))
8219 dig_port
->dp
.set_link_train
= cpt_set_link_train
;
8221 dig_port
->dp
.set_link_train
= g4x_set_link_train
;
8223 if (IS_CHERRYVIEW(dev_priv
))
8224 dig_port
->dp
.set_signal_levels
= chv_set_signal_levels
;
8225 else if (IS_VALLEYVIEW(dev_priv
))
8226 dig_port
->dp
.set_signal_levels
= vlv_set_signal_levels
;
8227 else if (IS_IVYBRIDGE(dev_priv
) && port
== PORT_A
)
8228 dig_port
->dp
.set_signal_levels
= ivb_cpu_edp_set_signal_levels
;
8229 else if (IS_GEN(dev_priv
, 6) && port
== PORT_A
)
8230 dig_port
->dp
.set_signal_levels
= snb_cpu_edp_set_signal_levels
;
8232 dig_port
->dp
.set_signal_levels
= g4x_set_signal_levels
;
8234 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
) ||
8235 (HAS_PCH_SPLIT(dev_priv
) && port
!= PORT_A
)) {
8236 dig_port
->dp
.preemph_max
= intel_dp_preemph_max_3
;
8237 dig_port
->dp
.voltage_max
= intel_dp_voltage_max_3
;
8239 dig_port
->dp
.preemph_max
= intel_dp_preemph_max_2
;
8240 dig_port
->dp
.voltage_max
= intel_dp_voltage_max_2
;
8243 dig_port
->dp
.output_reg
= output_reg
;
8244 dig_port
->max_lanes
= 4;
8246 intel_encoder
->type
= INTEL_OUTPUT_DP
;
8247 intel_encoder
->power_domain
= intel_port_to_power_domain(port
);
8248 if (IS_CHERRYVIEW(dev_priv
)) {
8250 intel_encoder
->pipe_mask
= BIT(PIPE_C
);
8252 intel_encoder
->pipe_mask
= BIT(PIPE_A
) | BIT(PIPE_B
);
8254 intel_encoder
->pipe_mask
= ~0;
8256 intel_encoder
->cloneable
= 0;
8257 intel_encoder
->port
= port
;
8258 intel_encoder
->hpd_pin
= intel_hpd_pin_default(dev_priv
, port
);
8260 dig_port
->hpd_pulse
= intel_dp_hpd_pulse
;
8262 if (HAS_GMCH(dev_priv
)) {
8263 if (IS_GM45(dev_priv
))
8264 dig_port
->connected
= gm45_digital_port_connected
;
8266 dig_port
->connected
= g4x_digital_port_connected
;
8269 dig_port
->connected
= ilk_digital_port_connected
;
8271 dig_port
->connected
= ibx_digital_port_connected
;
8275 intel_infoframe_init(dig_port
);
8277 dig_port
->aux_ch
= intel_bios_port_aux_ch(dev_priv
, port
);
8278 if (!intel_dp_init_connector(dig_port
, intel_connector
))
8279 goto err_init_connector
;
8284 drm_encoder_cleanup(encoder
);
8286 kfree(intel_connector
);
8287 err_connector_alloc
:
8292 void intel_dp_mst_suspend(struct drm_i915_private
*dev_priv
)
8294 struct intel_encoder
*encoder
;
8296 for_each_intel_encoder(&dev_priv
->drm
, encoder
) {
8297 struct intel_dp
*intel_dp
;
8299 if (encoder
->type
!= INTEL_OUTPUT_DDI
)
8302 intel_dp
= enc_to_intel_dp(encoder
);
8304 if (!intel_dp
->can_mst
)
8307 if (intel_dp
->is_mst
)
8308 drm_dp_mst_topology_mgr_suspend(&intel_dp
->mst_mgr
);
8312 void intel_dp_mst_resume(struct drm_i915_private
*dev_priv
)
8314 struct intel_encoder
*encoder
;
8316 for_each_intel_encoder(&dev_priv
->drm
, encoder
) {
8317 struct intel_dp
*intel_dp
;
8320 if (encoder
->type
!= INTEL_OUTPUT_DDI
)
8323 intel_dp
= enc_to_intel_dp(encoder
);
8325 if (!intel_dp
->can_mst
)
8328 ret
= drm_dp_mst_topology_mgr_resume(&intel_dp
->mst_mgr
,
8331 intel_dp
->is_mst
= false;
8332 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
,