2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
49 static const struct dp_link_dpll gen4_dpll
[] = {
51 { .p1
= 2, .p2
= 10, .n
= 2, .m1
= 23, .m2
= 8 } },
53 { .p1
= 1, .p2
= 10, .n
= 1, .m1
= 14, .m2
= 2 } }
56 static const struct dp_link_dpll pch_dpll
[] = {
58 { .p1
= 2, .p2
= 10, .n
= 1, .m1
= 12, .m2
= 9 } },
60 { .p1
= 1, .p2
= 10, .n
= 2, .m1
= 14, .m2
= 8 } }
63 static const struct dp_link_dpll vlv_dpll
[] = {
65 { .p1
= 3, .p2
= 2, .n
= 5, .m1
= 3, .m2
= 81 } },
67 { .p1
= 2, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 27 } }
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
74 static const struct dp_link_dpll chv_dpll
[] = {
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
80 { DP_LINK_BW_1_62
, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1
= 4, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 0x819999a } },
82 { DP_LINK_BW_2_7
, /* m2_int = 27, m2_fraction = 0 */
83 { .p1
= 4, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } },
84 { DP_LINK_BW_5_4
, /* m2_int = 27, m2_fraction = 0 */
85 { .p1
= 2, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } }
87 /* Skylake supports following rates */
88 static const int gen9_rates
[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90 static const int chv_rates
[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
93 static const int default_rates
[] = { 162000, 270000, 540000 };
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
102 static bool is_edp(struct intel_dp
*intel_dp
)
104 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
106 return intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
;
109 static struct drm_device
*intel_dp_to_dev(struct intel_dp
*intel_dp
)
111 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
113 return intel_dig_port
->base
.base
.dev
;
116 static struct intel_dp
*intel_attached_dp(struct drm_connector
*connector
)
118 return enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
121 static void intel_dp_link_down(struct intel_dp
*intel_dp
);
122 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
);
123 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
);
124 static void vlv_init_panel_power_sequencer(struct intel_dp
*intel_dp
);
125 static void vlv_steal_power_sequencer(struct drm_device
*dev
,
129 intel_dp_max_link_bw(struct intel_dp
*intel_dp
)
131 int max_link_bw
= intel_dp
->dpcd
[DP_MAX_LINK_RATE
];
133 switch (max_link_bw
) {
134 case DP_LINK_BW_1_62
:
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
141 max_link_bw
= DP_LINK_BW_1_62
;
147 static u8
intel_dp_max_lane_count(struct intel_dp
*intel_dp
)
149 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
150 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
151 u8 source_max
, sink_max
;
154 if (HAS_DDI(dev
) && intel_dig_port
->port
== PORT_A
&&
155 (intel_dig_port
->saved_port_bits
& DDI_A_4_LANES
) == 0)
158 sink_max
= drm_dp_max_lane_count(intel_dp
->dpcd
);
160 return min(source_max
, sink_max
);
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
169 * 270000 * 1 * 8 / 10 == 216000
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
181 intel_dp_link_required(int pixel_clock
, int bpp
)
183 return (pixel_clock
* bpp
+ 9) / 10;
187 intel_dp_max_data_rate(int max_link_clock
, int max_lanes
)
189 return (max_link_clock
* max_lanes
* 8) / 10;
192 static enum drm_mode_status
193 intel_dp_mode_valid(struct drm_connector
*connector
,
194 struct drm_display_mode
*mode
)
196 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
197 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
198 struct drm_display_mode
*fixed_mode
= intel_connector
->panel
.fixed_mode
;
199 int target_clock
= mode
->clock
;
200 int max_rate
, mode_rate
, max_lanes
, max_link_clock
;
202 if (is_edp(intel_dp
) && fixed_mode
) {
203 if (mode
->hdisplay
> fixed_mode
->hdisplay
)
206 if (mode
->vdisplay
> fixed_mode
->vdisplay
)
209 target_clock
= fixed_mode
->clock
;
212 max_link_clock
= intel_dp_max_link_rate(intel_dp
);
213 max_lanes
= intel_dp_max_lane_count(intel_dp
);
215 max_rate
= intel_dp_max_data_rate(max_link_clock
, max_lanes
);
216 mode_rate
= intel_dp_link_required(target_clock
, 18);
218 if (mode_rate
> max_rate
)
219 return MODE_CLOCK_HIGH
;
221 if (mode
->clock
< 10000)
222 return MODE_CLOCK_LOW
;
224 if (mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
225 return MODE_H_ILLEGAL
;
230 uint32_t intel_dp_pack_aux(const uint8_t *src
, int src_bytes
)
237 for (i
= 0; i
< src_bytes
; i
++)
238 v
|= ((uint32_t) src
[i
]) << ((3-i
) * 8);
242 static void intel_dp_unpack_aux(uint32_t src
, uint8_t *dst
, int dst_bytes
)
247 for (i
= 0; i
< dst_bytes
; i
++)
248 dst
[i
] = src
>> ((3-i
) * 8);
251 /* hrawclock is 1/4 the FSB frequency */
253 intel_hrawclk(struct drm_device
*dev
)
255 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev
))
262 clkcfg
= I915_READ(CLKCFG
);
263 switch (clkcfg
& CLKCFG_FSB_MASK
) {
272 case CLKCFG_FSB_1067
:
274 case CLKCFG_FSB_1333
:
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600
:
278 case CLKCFG_FSB_1600_ALT
:
286 intel_dp_init_panel_power_sequencer(struct drm_device
*dev
,
287 struct intel_dp
*intel_dp
);
289 intel_dp_init_panel_power_sequencer_registers(struct drm_device
*dev
,
290 struct intel_dp
*intel_dp
);
292 static void pps_lock(struct intel_dp
*intel_dp
)
294 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
295 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
296 struct drm_device
*dev
= encoder
->base
.dev
;
297 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
298 enum intel_display_power_domain power_domain
;
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
304 power_domain
= intel_display_port_power_domain(encoder
);
305 intel_display_power_get(dev_priv
, power_domain
);
307 mutex_lock(&dev_priv
->pps_mutex
);
310 static void pps_unlock(struct intel_dp
*intel_dp
)
312 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
313 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
314 struct drm_device
*dev
= encoder
->base
.dev
;
315 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
316 enum intel_display_power_domain power_domain
;
318 mutex_unlock(&dev_priv
->pps_mutex
);
320 power_domain
= intel_display_port_power_domain(encoder
);
321 intel_display_power_put(dev_priv
, power_domain
);
325 vlv_power_sequencer_kick(struct intel_dp
*intel_dp
)
327 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
328 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
329 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
330 enum pipe pipe
= intel_dp
->pps_pipe
;
334 if (WARN(I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe
), port_name(intel_dig_port
->port
)))
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe
), port_name(intel_dig_port
->port
));
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
345 DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
346 DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
347 DP
|= DP_PORT_WIDTH(1);
348 DP
|= DP_LINK_TRAIN_PAT_1
;
350 if (IS_CHERRYVIEW(dev
))
351 DP
|= DP_PIPE_SELECT_CHV(pipe
);
352 else if (pipe
== PIPE_B
)
353 DP
|= DP_PIPEB_SELECT
;
355 pll_enabled
= I915_READ(DPLL(pipe
)) & DPLL_VCO_ENABLE
;
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
362 vlv_force_pll_on(dev
, pipe
, IS_CHERRYVIEW(dev
) ?
363 &chv_dpll
[0].dpll
: &vlv_dpll
[0].dpll
);
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
371 I915_WRITE(intel_dp
->output_reg
, DP
);
372 POSTING_READ(intel_dp
->output_reg
);
374 I915_WRITE(intel_dp
->output_reg
, DP
| DP_PORT_EN
);
375 POSTING_READ(intel_dp
->output_reg
);
377 I915_WRITE(intel_dp
->output_reg
, DP
& ~DP_PORT_EN
);
378 POSTING_READ(intel_dp
->output_reg
);
381 vlv_force_pll_off(dev
, pipe
);
385 vlv_power_sequencer_pipe(struct intel_dp
*intel_dp
)
387 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
388 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
389 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
390 struct intel_encoder
*encoder
;
391 unsigned int pipes
= (1 << PIPE_A
) | (1 << PIPE_B
);
394 lockdep_assert_held(&dev_priv
->pps_mutex
);
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp
));
399 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
400 return intel_dp
->pps_pipe
;
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
406 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
408 struct intel_dp
*tmp
;
410 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
413 tmp
= enc_to_intel_dp(&encoder
->base
);
415 if (tmp
->pps_pipe
!= INVALID_PIPE
)
416 pipes
&= ~(1 << tmp
->pps_pipe
);
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
423 if (WARN_ON(pipes
== 0))
426 pipe
= ffs(pipes
) - 1;
428 vlv_steal_power_sequencer(dev
, pipe
);
429 intel_dp
->pps_pipe
= pipe
;
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp
->pps_pipe
),
433 port_name(intel_dig_port
->port
));
435 /* init power sequencer on this pipe and port */
436 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
437 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
443 vlv_power_sequencer_kick(intel_dp
);
445 return intel_dp
->pps_pipe
;
448 typedef bool (*vlv_pipe_check
)(struct drm_i915_private
*dev_priv
,
451 static bool vlv_pipe_has_pp_on(struct drm_i915_private
*dev_priv
,
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe
)) & PP_ON
;
457 static bool vlv_pipe_has_vdd_on(struct drm_i915_private
*dev_priv
,
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe
)) & EDP_FORCE_VDD
;
463 static bool vlv_pipe_any(struct drm_i915_private
*dev_priv
,
470 vlv_initial_pps_pipe(struct drm_i915_private
*dev_priv
,
472 vlv_pipe_check pipe_check
)
476 for (pipe
= PIPE_A
; pipe
<= PIPE_B
; pipe
++) {
477 u32 port_sel
= I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe
)) &
478 PANEL_PORT_SELECT_MASK
;
480 if (port_sel
!= PANEL_PORT_SELECT_VLV(port
))
483 if (!pipe_check(dev_priv
, pipe
))
493 vlv_initial_power_sequencer_setup(struct intel_dp
*intel_dp
)
495 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
496 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
497 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
498 enum port port
= intel_dig_port
->port
;
500 lockdep_assert_held(&dev_priv
->pps_mutex
);
502 /* try to find a pipe with this port selected */
503 /* first pick one where the panel is on */
504 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
508 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
509 vlv_pipe_has_vdd_on
);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
512 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp
->pps_pipe
== INVALID_PIPE
) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port
), pipe_name(intel_dp
->pps_pipe
));
525 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
526 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
529 void vlv_power_sequencer_reset(struct drm_i915_private
*dev_priv
)
531 struct drm_device
*dev
= dev_priv
->dev
;
532 struct intel_encoder
*encoder
;
534 if (WARN_ON(!IS_VALLEYVIEW(dev
)))
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
547 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, base
.head
) {
548 struct intel_dp
*intel_dp
;
550 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
553 intel_dp
= enc_to_intel_dp(&encoder
->base
);
554 intel_dp
->pps_pipe
= INVALID_PIPE
;
558 static u32
_pp_ctrl_reg(struct intel_dp
*intel_dp
)
560 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
562 if (HAS_PCH_SPLIT(dev
))
563 return PCH_PP_CONTROL
;
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp
));
568 static u32
_pp_stat_reg(struct intel_dp
*intel_dp
)
570 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
572 if (HAS_PCH_SPLIT(dev
))
573 return PCH_PP_STATUS
;
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp
));
578 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580 static int edp_notify_handler(struct notifier_block
*this, unsigned long code
,
583 struct intel_dp
*intel_dp
= container_of(this, typeof(* intel_dp
),
585 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
586 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
588 u32 pp_ctrl_reg
, pp_div_reg
;
590 if (!is_edp(intel_dp
) || code
!= SYS_RESTART
)
595 if (IS_VALLEYVIEW(dev
)) {
596 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
598 pp_ctrl_reg
= VLV_PIPE_PP_CONTROL(pipe
);
599 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
600 pp_div
= I915_READ(pp_div_reg
);
601 pp_div
&= PP_REFERENCE_DIVIDER_MASK
;
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg
, pp_div
| 0x1F);
605 I915_WRITE(pp_ctrl_reg
, PANEL_UNLOCK_REGS
| PANEL_POWER_OFF
);
606 msleep(intel_dp
->panel_power_cycle_delay
);
609 pps_unlock(intel_dp
);
614 static bool edp_have_panel_power(struct intel_dp
*intel_dp
)
616 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
617 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
619 lockdep_assert_held(&dev_priv
->pps_mutex
);
621 if (IS_VALLEYVIEW(dev
) &&
622 intel_dp
->pps_pipe
== INVALID_PIPE
)
625 return (I915_READ(_pp_stat_reg(intel_dp
)) & PP_ON
) != 0;
628 static bool edp_have_panel_vdd(struct intel_dp
*intel_dp
)
630 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
631 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
633 lockdep_assert_held(&dev_priv
->pps_mutex
);
635 if (IS_VALLEYVIEW(dev
) &&
636 intel_dp
->pps_pipe
== INVALID_PIPE
)
639 return I915_READ(_pp_ctrl_reg(intel_dp
)) & EDP_FORCE_VDD
;
643 intel_dp_check_edp(struct intel_dp
*intel_dp
)
645 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
646 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
648 if (!is_edp(intel_dp
))
651 if (!edp_have_panel_power(intel_dp
) && !edp_have_panel_vdd(intel_dp
)) {
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
654 I915_READ(_pp_stat_reg(intel_dp
)),
655 I915_READ(_pp_ctrl_reg(intel_dp
)));
660 intel_dp_aux_wait_done(struct intel_dp
*intel_dp
, bool has_aux_irq
)
662 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
663 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
664 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
665 uint32_t ch_ctl
= intel_dp
->aux_ch_ctl_reg
;
669 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
671 done
= wait_event_timeout(dev_priv
->gmbus_wait_queue
, C
,
672 msecs_to_jiffies_timeout(10));
674 done
= wait_for_atomic(C
, 10) == 0;
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
683 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
685 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
686 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
692 return index
? 0 : intel_hrawclk(dev
) / 2;
695 static uint32_t ilk_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
697 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
698 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
703 if (intel_dig_port
->port
== PORT_A
) {
704 if (IS_GEN6(dev
) || IS_GEN7(dev
))
705 return 200; /* SNB & IVB eDP input clock at 400Mhz */
707 return 225; /* eDP input clock at 450Mhz */
709 return DIV_ROUND_UP(intel_pch_rawclk(dev
), 2);
713 static uint32_t hsw_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
715 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
716 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
717 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
719 if (intel_dig_port
->port
== PORT_A
) {
722 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv
), 2000);
723 } else if (dev_priv
->pch_id
== INTEL_PCH_LPT_DEVICE_ID_TYPE
) {
724 /* Workaround for non-ULT HSW */
731 return index
? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev
), 2);
735 static uint32_t vlv_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
737 return index
? 0 : 100;
740 static uint32_t skl_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
743 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 * derive the clock from CDCLK automatically). We still implement the
745 * get_aux_clock_divider vfunc to plug-in into the existing code.
747 return index
? 0 : 1;
750 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp
*intel_dp
,
753 uint32_t aux_clock_divider
)
755 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
756 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
757 uint32_t precharge
, timeout
;
764 if (IS_BROADWELL(dev
) && intel_dp
->aux_ch_ctl_reg
== DPA_AUX_CH_CTL
)
765 timeout
= DP_AUX_CH_CTL_TIME_OUT_600us
;
767 timeout
= DP_AUX_CH_CTL_TIME_OUT_400us
;
769 return DP_AUX_CH_CTL_SEND_BUSY
|
771 (has_aux_irq
? DP_AUX_CH_CTL_INTERRUPT
: 0) |
772 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
774 DP_AUX_CH_CTL_RECEIVE_ERROR
|
775 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
776 (precharge
<< DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT
) |
777 (aux_clock_divider
<< DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT
);
780 static uint32_t skl_get_aux_send_ctl(struct intel_dp
*intel_dp
,
785 return DP_AUX_CH_CTL_SEND_BUSY
|
787 (has_aux_irq
? DP_AUX_CH_CTL_INTERRUPT
: 0) |
788 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
789 DP_AUX_CH_CTL_TIME_OUT_1600us
|
790 DP_AUX_CH_CTL_RECEIVE_ERROR
|
791 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
792 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
796 intel_dp_aux_ch(struct intel_dp
*intel_dp
,
797 const uint8_t *send
, int send_bytes
,
798 uint8_t *recv
, int recv_size
)
800 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
801 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
802 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
803 uint32_t ch_ctl
= intel_dp
->aux_ch_ctl_reg
;
804 uint32_t ch_data
= ch_ctl
+ 4;
805 uint32_t aux_clock_divider
;
806 int i
, ret
, recv_bytes
;
809 bool has_aux_irq
= HAS_AUX_IRQ(dev
);
815 * We will be called with VDD already enabled for dpcd/edid/oui reads.
816 * In such cases we want to leave VDD enabled and it's up to upper layers
817 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
820 vdd
= edp_panel_vdd_on(intel_dp
);
822 /* dp aux is extremely sensitive to irq latency, hence request the
823 * lowest possible wakeup latency and so prevent the cpu from going into
826 pm_qos_update_request(&dev_priv
->pm_qos
, 0);
828 intel_dp_check_edp(intel_dp
);
830 intel_aux_display_runtime_get(dev_priv
);
832 /* Try to wait for any previous AUX channel activity */
833 for (try = 0; try < 3; try++) {
834 status
= I915_READ_NOTRACE(ch_ctl
);
835 if ((status
& DP_AUX_CH_CTL_SEND_BUSY
) == 0)
841 WARN(1, "dp_aux_ch not started status 0x%08x\n",
847 /* Only 5 data registers! */
848 if (WARN_ON(send_bytes
> 20 || recv_size
> 20)) {
853 while ((aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, clock
++))) {
854 u32 send_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
,
859 /* Must try at least 3 times according to DP spec */
860 for (try = 0; try < 5; try++) {
861 /* Load the send data into the aux channel data registers */
862 for (i
= 0; i
< send_bytes
; i
+= 4)
863 I915_WRITE(ch_data
+ i
,
864 intel_dp_pack_aux(send
+ i
,
867 /* Send the command and wait for it to complete */
868 I915_WRITE(ch_ctl
, send_ctl
);
870 status
= intel_dp_aux_wait_done(intel_dp
, has_aux_irq
);
872 /* Clear done status and any errors */
876 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
877 DP_AUX_CH_CTL_RECEIVE_ERROR
);
879 if (status
& (DP_AUX_CH_CTL_TIME_OUT_ERROR
|
880 DP_AUX_CH_CTL_RECEIVE_ERROR
))
882 if (status
& DP_AUX_CH_CTL_DONE
)
887 if ((status
& DP_AUX_CH_CTL_DONE
) == 0) {
888 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status
);
894 /* Check for timeout or receive error.
895 * Timeouts occur when the sink is not connected
897 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
898 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status
);
903 /* Timeouts occur when the device isn't connected, so they're
904 * "normal" -- don't fill the kernel log with these */
905 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
) {
906 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status
);
911 /* Unload any bytes sent back from the other side */
912 recv_bytes
= ((status
& DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
) >>
913 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
);
914 if (recv_bytes
> recv_size
)
915 recv_bytes
= recv_size
;
917 for (i
= 0; i
< recv_bytes
; i
+= 4)
918 intel_dp_unpack_aux(I915_READ(ch_data
+ i
),
919 recv
+ i
, recv_bytes
- i
);
923 pm_qos_update_request(&dev_priv
->pm_qos
, PM_QOS_DEFAULT_VALUE
);
924 intel_aux_display_runtime_put(dev_priv
);
927 edp_panel_vdd_off(intel_dp
, false);
929 pps_unlock(intel_dp
);
934 #define BARE_ADDRESS_SIZE 3
935 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
937 intel_dp_aux_transfer(struct drm_dp_aux
*aux
, struct drm_dp_aux_msg
*msg
)
939 struct intel_dp
*intel_dp
= container_of(aux
, struct intel_dp
, aux
);
940 uint8_t txbuf
[20], rxbuf
[20];
941 size_t txsize
, rxsize
;
944 txbuf
[0] = (msg
->request
<< 4) |
945 ((msg
->address
>> 16) & 0xf);
946 txbuf
[1] = (msg
->address
>> 8) & 0xff;
947 txbuf
[2] = msg
->address
& 0xff;
948 txbuf
[3] = msg
->size
- 1;
950 switch (msg
->request
& ~DP_AUX_I2C_MOT
) {
951 case DP_AUX_NATIVE_WRITE
:
952 case DP_AUX_I2C_WRITE
:
953 txsize
= msg
->size
? HEADER_SIZE
+ msg
->size
: BARE_ADDRESS_SIZE
;
954 rxsize
= 2; /* 0 or 1 data bytes */
956 if (WARN_ON(txsize
> 20))
959 memcpy(txbuf
+ HEADER_SIZE
, msg
->buffer
, msg
->size
);
961 ret
= intel_dp_aux_ch(intel_dp
, txbuf
, txsize
, rxbuf
, rxsize
);
963 msg
->reply
= rxbuf
[0] >> 4;
966 /* Number of bytes written in a short write. */
967 ret
= clamp_t(int, rxbuf
[1], 0, msg
->size
);
969 /* Return payload size. */
975 case DP_AUX_NATIVE_READ
:
976 case DP_AUX_I2C_READ
:
977 txsize
= msg
->size
? HEADER_SIZE
: BARE_ADDRESS_SIZE
;
978 rxsize
= msg
->size
+ 1;
980 if (WARN_ON(rxsize
> 20))
983 ret
= intel_dp_aux_ch(intel_dp
, txbuf
, txsize
, rxbuf
, rxsize
);
985 msg
->reply
= rxbuf
[0] >> 4;
987 * Assume happy day, and copy the data. The caller is
988 * expected to check msg->reply before touching it.
990 * Return payload size.
993 memcpy(msg
->buffer
, rxbuf
+ 1, ret
);
1006 intel_dp_aux_init(struct intel_dp
*intel_dp
, struct intel_connector
*connector
)
1008 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1009 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1010 enum port port
= intel_dig_port
->port
;
1011 const char *name
= NULL
;
1016 intel_dp
->aux_ch_ctl_reg
= DPA_AUX_CH_CTL
;
1020 intel_dp
->aux_ch_ctl_reg
= PCH_DPB_AUX_CH_CTL
;
1024 intel_dp
->aux_ch_ctl_reg
= PCH_DPC_AUX_CH_CTL
;
1028 intel_dp
->aux_ch_ctl_reg
= PCH_DPD_AUX_CH_CTL
;
1036 * The AUX_CTL register is usually DP_CTL + 0x10.
1038 * On Haswell and Broadwell though:
1039 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1040 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1042 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1044 if (!IS_HASWELL(dev
) && !IS_BROADWELL(dev
))
1045 intel_dp
->aux_ch_ctl_reg
= intel_dp
->output_reg
+ 0x10;
1047 intel_dp
->aux
.name
= name
;
1048 intel_dp
->aux
.dev
= dev
->dev
;
1049 intel_dp
->aux
.transfer
= intel_dp_aux_transfer
;
1051 DRM_DEBUG_KMS("registering %s bus for %s\n", name
,
1052 connector
->base
.kdev
->kobj
.name
);
1054 ret
= drm_dp_aux_register(&intel_dp
->aux
);
1056 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1061 ret
= sysfs_create_link(&connector
->base
.kdev
->kobj
,
1062 &intel_dp
->aux
.ddc
.dev
.kobj
,
1063 intel_dp
->aux
.ddc
.dev
.kobj
.name
);
1065 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name
, ret
);
1066 drm_dp_aux_unregister(&intel_dp
->aux
);
1071 intel_dp_connector_unregister(struct intel_connector
*intel_connector
)
1073 struct intel_dp
*intel_dp
= intel_attached_dp(&intel_connector
->base
);
1075 if (!intel_connector
->mst_port
)
1076 sysfs_remove_link(&intel_connector
->base
.kdev
->kobj
,
1077 intel_dp
->aux
.ddc
.dev
.kobj
.name
);
1078 intel_connector_unregister(intel_connector
);
1082 skl_edp_set_pll_config(struct intel_crtc_state
*pipe_config
, int link_clock
)
1086 pipe_config
->ddi_pll_sel
= SKL_DPLL0
;
1087 pipe_config
->dpll_hw_state
.cfgcr1
= 0;
1088 pipe_config
->dpll_hw_state
.cfgcr2
= 0;
1090 ctrl1
= DPLL_CTRL1_OVERRIDE(SKL_DPLL0
);
1091 switch (link_clock
/ 2) {
1093 ctrl1
|= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810
,
1097 ctrl1
|= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350
,
1101 ctrl1
|= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700
,
1105 ctrl1
|= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620
,
1108 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1109 results in CDCLK change. Need to handle the change of CDCLK by
1110 disabling pipes and re-enabling them */
1112 ctrl1
|= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080
,
1116 ctrl1
|= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160
,
1121 pipe_config
->dpll_hw_state
.ctrl1
= ctrl1
;
1125 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state
*pipe_config
, int link_bw
)
1128 case DP_LINK_BW_1_62
:
1129 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_810
;
1131 case DP_LINK_BW_2_7
:
1132 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_1350
;
1134 case DP_LINK_BW_5_4
:
1135 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_2700
;
1141 intel_dp_sink_rates(struct intel_dp
*intel_dp
, const int **sink_rates
)
1143 if (intel_dp
->num_sink_rates
) {
1144 *sink_rates
= intel_dp
->sink_rates
;
1145 return intel_dp
->num_sink_rates
;
1148 *sink_rates
= default_rates
;
1150 return (intel_dp_max_link_bw(intel_dp
) >> 3) + 1;
1153 static bool intel_dp_source_supports_hbr2(struct drm_device
*dev
)
1155 /* WaDisableHBR2:skl */
1156 if (IS_SKYLAKE(dev
) && INTEL_REVID(dev
) <= SKL_REVID_B0
)
1159 if ((IS_HASWELL(dev
) && !IS_HSW_ULX(dev
)) || IS_BROADWELL(dev
) ||
1160 (INTEL_INFO(dev
)->gen
>= 9))
1167 intel_dp_source_rates(struct drm_device
*dev
, const int **source_rates
)
1169 if (INTEL_INFO(dev
)->gen
>= 9) {
1170 *source_rates
= gen9_rates
;
1171 return ARRAY_SIZE(gen9_rates
);
1172 } else if (IS_CHERRYVIEW(dev
)) {
1173 *source_rates
= chv_rates
;
1174 return ARRAY_SIZE(chv_rates
);
1177 *source_rates
= default_rates
;
1179 /* This depends on the fact that 5.4 is last value in the array */
1180 if (intel_dp_source_supports_hbr2(dev
))
1181 return (DP_LINK_BW_5_4
>> 3) + 1;
1183 return (DP_LINK_BW_2_7
>> 3) + 1;
1187 intel_dp_set_clock(struct intel_encoder
*encoder
,
1188 struct intel_crtc_state
*pipe_config
, int link_bw
)
1190 struct drm_device
*dev
= encoder
->base
.dev
;
1191 const struct dp_link_dpll
*divisor
= NULL
;
1195 divisor
= gen4_dpll
;
1196 count
= ARRAY_SIZE(gen4_dpll
);
1197 } else if (HAS_PCH_SPLIT(dev
)) {
1199 count
= ARRAY_SIZE(pch_dpll
);
1200 } else if (IS_CHERRYVIEW(dev
)) {
1202 count
= ARRAY_SIZE(chv_dpll
);
1203 } else if (IS_VALLEYVIEW(dev
)) {
1205 count
= ARRAY_SIZE(vlv_dpll
);
1208 if (divisor
&& count
) {
1209 for (i
= 0; i
< count
; i
++) {
1210 if (link_bw
== divisor
[i
].link_bw
) {
1211 pipe_config
->dpll
= divisor
[i
].dpll
;
1212 pipe_config
->clock_set
= true;
1219 static int intersect_rates(const int *source_rates
, int source_len
,
1220 const int *sink_rates
, int sink_len
,
1223 int i
= 0, j
= 0, k
= 0;
1225 while (i
< source_len
&& j
< sink_len
) {
1226 if (source_rates
[i
] == sink_rates
[j
]) {
1227 if (WARN_ON(k
>= DP_MAX_SUPPORTED_RATES
))
1229 common_rates
[k
] = source_rates
[i
];
1233 } else if (source_rates
[i
] < sink_rates
[j
]) {
1242 static int intel_dp_common_rates(struct intel_dp
*intel_dp
,
1245 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1246 const int *source_rates
, *sink_rates
;
1247 int source_len
, sink_len
;
1249 sink_len
= intel_dp_sink_rates(intel_dp
, &sink_rates
);
1250 source_len
= intel_dp_source_rates(dev
, &source_rates
);
1252 return intersect_rates(source_rates
, source_len
,
1253 sink_rates
, sink_len
,
1257 static void snprintf_int_array(char *str
, size_t len
,
1258 const int *array
, int nelem
)
1264 for (i
= 0; i
< nelem
; i
++) {
1265 int r
= snprintf(str
, len
, "%d,", array
[i
]);
1273 static void intel_dp_print_rates(struct intel_dp
*intel_dp
)
1275 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1276 const int *source_rates
, *sink_rates
;
1277 int source_len
, sink_len
, common_len
;
1278 int common_rates
[DP_MAX_SUPPORTED_RATES
];
1279 char str
[128]; /* FIXME: too big for stack? */
1281 if ((drm_debug
& DRM_UT_KMS
) == 0)
1284 source_len
= intel_dp_source_rates(dev
, &source_rates
);
1285 snprintf_int_array(str
, sizeof(str
), source_rates
, source_len
);
1286 DRM_DEBUG_KMS("source rates: %s\n", str
);
1288 sink_len
= intel_dp_sink_rates(intel_dp
, &sink_rates
);
1289 snprintf_int_array(str
, sizeof(str
), sink_rates
, sink_len
);
1290 DRM_DEBUG_KMS("sink rates: %s\n", str
);
1292 common_len
= intel_dp_common_rates(intel_dp
, common_rates
);
1293 snprintf_int_array(str
, sizeof(str
), common_rates
, common_len
);
1294 DRM_DEBUG_KMS("common rates: %s\n", str
);
1297 static int rate_to_index(int find
, const int *rates
)
1301 for (i
= 0; i
< DP_MAX_SUPPORTED_RATES
; ++i
)
1302 if (find
== rates
[i
])
1309 intel_dp_max_link_rate(struct intel_dp
*intel_dp
)
1311 int rates
[DP_MAX_SUPPORTED_RATES
] = {};
1314 len
= intel_dp_common_rates(intel_dp
, rates
);
1315 if (WARN_ON(len
<= 0))
1318 return rates
[rate_to_index(0, rates
) - 1];
1321 int intel_dp_rate_select(struct intel_dp
*intel_dp
, int rate
)
1323 return rate_to_index(rate
, intel_dp
->sink_rates
);
1327 intel_dp_compute_config(struct intel_encoder
*encoder
,
1328 struct intel_crtc_state
*pipe_config
)
1330 struct drm_device
*dev
= encoder
->base
.dev
;
1331 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1332 struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
1333 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
1334 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1335 struct intel_crtc
*intel_crtc
= to_intel_crtc(pipe_config
->base
.crtc
);
1336 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
1337 int lane_count
, clock
;
1338 int min_lane_count
= 1;
1339 int max_lane_count
= intel_dp_max_lane_count(intel_dp
);
1340 /* Conveniently, the link BW constants become indices with a shift...*/
1344 int link_avail
, link_clock
;
1345 int common_rates
[DP_MAX_SUPPORTED_RATES
] = {};
1348 common_len
= intel_dp_common_rates(intel_dp
, common_rates
);
1350 /* No common link rates between source and sink */
1351 WARN_ON(common_len
<= 0);
1353 max_clock
= common_len
- 1;
1355 if (HAS_PCH_SPLIT(dev
) && !HAS_DDI(dev
) && port
!= PORT_A
)
1356 pipe_config
->has_pch_encoder
= true;
1358 pipe_config
->has_dp_encoder
= true;
1359 pipe_config
->has_drrs
= false;
1360 pipe_config
->has_audio
= intel_dp
->has_audio
&& port
!= PORT_A
;
1362 if (is_edp(intel_dp
) && intel_connector
->panel
.fixed_mode
) {
1363 intel_fixed_panel_mode(intel_connector
->panel
.fixed_mode
,
1365 if (!HAS_PCH_SPLIT(dev
))
1366 intel_gmch_panel_fitting(intel_crtc
, pipe_config
,
1367 intel_connector
->panel
.fitting_mode
);
1369 intel_pch_panel_fitting(intel_crtc
, pipe_config
,
1370 intel_connector
->panel
.fitting_mode
);
1373 if (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
1376 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1377 "max bw %d pixel clock %iKHz\n",
1378 max_lane_count
, common_rates
[max_clock
],
1379 adjusted_mode
->crtc_clock
);
1381 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1382 * bpc in between. */
1383 bpp
= pipe_config
->pipe_bpp
;
1384 if (is_edp(intel_dp
)) {
1385 if (dev_priv
->vbt
.edp_bpp
&& dev_priv
->vbt
.edp_bpp
< bpp
) {
1386 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1387 dev_priv
->vbt
.edp_bpp
);
1388 bpp
= dev_priv
->vbt
.edp_bpp
;
1392 * Use the maximum clock and number of lanes the eDP panel
1393 * advertizes being capable of. The panels are generally
1394 * designed to support only a single clock and lane
1395 * configuration, and typically these values correspond to the
1396 * native resolution of the panel.
1398 min_lane_count
= max_lane_count
;
1399 min_clock
= max_clock
;
1402 for (; bpp
>= 6*3; bpp
-= 2*3) {
1403 mode_rate
= intel_dp_link_required(adjusted_mode
->crtc_clock
,
1406 for (clock
= min_clock
; clock
<= max_clock
; clock
++) {
1407 for (lane_count
= min_lane_count
;
1408 lane_count
<= max_lane_count
;
1411 link_clock
= common_rates
[clock
];
1412 link_avail
= intel_dp_max_data_rate(link_clock
,
1415 if (mode_rate
<= link_avail
) {
1425 if (intel_dp
->color_range_auto
) {
1428 * CEA-861-E - 5.1 Default Encoding Parameters
1429 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1431 if (bpp
!= 18 && drm_match_cea_mode(adjusted_mode
) > 1)
1432 intel_dp
->color_range
= DP_COLOR_RANGE_16_235
;
1434 intel_dp
->color_range
= 0;
1437 if (intel_dp
->color_range
)
1438 pipe_config
->limited_color_range
= true;
1440 intel_dp
->lane_count
= lane_count
;
1442 if (intel_dp
->num_sink_rates
) {
1443 intel_dp
->link_bw
= 0;
1444 intel_dp
->rate_select
=
1445 intel_dp_rate_select(intel_dp
, common_rates
[clock
]);
1448 drm_dp_link_rate_to_bw_code(common_rates
[clock
]);
1449 intel_dp
->rate_select
= 0;
1452 pipe_config
->pipe_bpp
= bpp
;
1453 pipe_config
->port_clock
= common_rates
[clock
];
1455 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1456 intel_dp
->link_bw
, intel_dp
->lane_count
,
1457 pipe_config
->port_clock
, bpp
);
1458 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1459 mode_rate
, link_avail
);
1461 intel_link_compute_m_n(bpp
, lane_count
,
1462 adjusted_mode
->crtc_clock
,
1463 pipe_config
->port_clock
,
1464 &pipe_config
->dp_m_n
);
1466 if (intel_connector
->panel
.downclock_mode
!= NULL
&&
1467 dev_priv
->drrs
.type
== SEAMLESS_DRRS_SUPPORT
) {
1468 pipe_config
->has_drrs
= true;
1469 intel_link_compute_m_n(bpp
, lane_count
,
1470 intel_connector
->panel
.downclock_mode
->clock
,
1471 pipe_config
->port_clock
,
1472 &pipe_config
->dp_m2_n2
);
1475 if (IS_SKYLAKE(dev
) && is_edp(intel_dp
))
1476 skl_edp_set_pll_config(pipe_config
, common_rates
[clock
]);
1477 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1478 hsw_dp_set_ddi_pll_sel(pipe_config
, intel_dp
->link_bw
);
1480 intel_dp_set_clock(encoder
, pipe_config
, intel_dp
->link_bw
);
1485 static void ironlake_set_pll_cpu_edp(struct intel_dp
*intel_dp
)
1487 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1488 struct intel_crtc
*crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
1489 struct drm_device
*dev
= crtc
->base
.dev
;
1490 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1493 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1494 crtc
->config
->port_clock
);
1495 dpa_ctl
= I915_READ(DP_A
);
1496 dpa_ctl
&= ~DP_PLL_FREQ_MASK
;
1498 if (crtc
->config
->port_clock
== 162000) {
1499 /* For a long time we've carried around a ILK-DevA w/a for the
1500 * 160MHz clock. If we're really unlucky, it's still required.
1502 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1503 dpa_ctl
|= DP_PLL_FREQ_160MHZ
;
1504 intel_dp
->DP
|= DP_PLL_FREQ_160MHZ
;
1506 dpa_ctl
|= DP_PLL_FREQ_270MHZ
;
1507 intel_dp
->DP
|= DP_PLL_FREQ_270MHZ
;
1510 I915_WRITE(DP_A
, dpa_ctl
);
1516 static void intel_dp_prepare(struct intel_encoder
*encoder
)
1518 struct drm_device
*dev
= encoder
->base
.dev
;
1519 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1520 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
1521 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1522 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
1523 struct drm_display_mode
*adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
1526 * There are four kinds of DP registers:
1533 * IBX PCH and CPU are the same for almost everything,
1534 * except that the CPU DP PLL is configured in this
1537 * CPT PCH is quite different, having many bits moved
1538 * to the TRANS_DP_CTL register instead. That
1539 * configuration happens (oddly) in ironlake_pch_enable
1542 /* Preserve the BIOS-computed detected bit. This is
1543 * supposed to be read-only.
1545 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
1547 /* Handle DP bits in common between all three register formats */
1548 intel_dp
->DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
1549 intel_dp
->DP
|= DP_PORT_WIDTH(intel_dp
->lane_count
);
1551 if (crtc
->config
->has_audio
)
1552 intel_dp
->DP
|= DP_AUDIO_OUTPUT_ENABLE
;
1554 /* Split out the IBX/CPU vs CPT settings */
1556 if (port
== PORT_A
&& IS_GEN7(dev
) && !IS_VALLEYVIEW(dev
)) {
1557 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
1558 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
1559 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
1560 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
1561 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
1563 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1564 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
1566 intel_dp
->DP
|= crtc
->pipe
<< 29;
1567 } else if (!HAS_PCH_CPT(dev
) || port
== PORT_A
) {
1568 if (!HAS_PCH_SPLIT(dev
) && !IS_VALLEYVIEW(dev
))
1569 intel_dp
->DP
|= intel_dp
->color_range
;
1571 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
1572 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
1573 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
1574 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
1575 intel_dp
->DP
|= DP_LINK_TRAIN_OFF
;
1577 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1578 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
1580 if (!IS_CHERRYVIEW(dev
)) {
1581 if (crtc
->pipe
== 1)
1582 intel_dp
->DP
|= DP_PIPEB_SELECT
;
1584 intel_dp
->DP
|= DP_PIPE_SELECT_CHV(crtc
->pipe
);
1587 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
1591 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1592 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1594 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1595 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1597 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1598 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1600 static void wait_panel_status(struct intel_dp
*intel_dp
,
1604 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1605 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1606 u32 pp_stat_reg
, pp_ctrl_reg
;
1608 lockdep_assert_held(&dev_priv
->pps_mutex
);
1610 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1611 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1613 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1615 I915_READ(pp_stat_reg
),
1616 I915_READ(pp_ctrl_reg
));
1618 if (_wait_for((I915_READ(pp_stat_reg
) & mask
) == value
, 5000, 10)) {
1619 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1620 I915_READ(pp_stat_reg
),
1621 I915_READ(pp_ctrl_reg
));
1624 DRM_DEBUG_KMS("Wait complete\n");
1627 static void wait_panel_on(struct intel_dp
*intel_dp
)
1629 DRM_DEBUG_KMS("Wait for panel power on\n");
1630 wait_panel_status(intel_dp
, IDLE_ON_MASK
, IDLE_ON_VALUE
);
1633 static void wait_panel_off(struct intel_dp
*intel_dp
)
1635 DRM_DEBUG_KMS("Wait for panel power off time\n");
1636 wait_panel_status(intel_dp
, IDLE_OFF_MASK
, IDLE_OFF_VALUE
);
1639 static void wait_panel_power_cycle(struct intel_dp
*intel_dp
)
1641 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1643 /* When we disable the VDD override bit last we have to do the manual
1645 wait_remaining_ms_from_jiffies(intel_dp
->last_power_cycle
,
1646 intel_dp
->panel_power_cycle_delay
);
1648 wait_panel_status(intel_dp
, IDLE_CYCLE_MASK
, IDLE_CYCLE_VALUE
);
1651 static void wait_backlight_on(struct intel_dp
*intel_dp
)
1653 wait_remaining_ms_from_jiffies(intel_dp
->last_power_on
,
1654 intel_dp
->backlight_on_delay
);
1657 static void edp_wait_backlight_off(struct intel_dp
*intel_dp
)
1659 wait_remaining_ms_from_jiffies(intel_dp
->last_backlight_off
,
1660 intel_dp
->backlight_off_delay
);
1663 /* Read the current pp_control value, unlocking the register if it
1667 static u32
ironlake_get_pp_control(struct intel_dp
*intel_dp
)
1669 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1670 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1673 lockdep_assert_held(&dev_priv
->pps_mutex
);
1675 control
= I915_READ(_pp_ctrl_reg(intel_dp
));
1676 control
&= ~PANEL_UNLOCK_MASK
;
1677 control
|= PANEL_UNLOCK_REGS
;
1682 * Must be paired with edp_panel_vdd_off().
1683 * Must hold pps_mutex around the whole on/off sequence.
1684 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1686 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
)
1688 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1689 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1690 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1691 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1692 enum intel_display_power_domain power_domain
;
1694 u32 pp_stat_reg
, pp_ctrl_reg
;
1695 bool need_to_disable
= !intel_dp
->want_panel_vdd
;
1697 lockdep_assert_held(&dev_priv
->pps_mutex
);
1699 if (!is_edp(intel_dp
))
1702 cancel_delayed_work(&intel_dp
->panel_vdd_work
);
1703 intel_dp
->want_panel_vdd
= true;
1705 if (edp_have_panel_vdd(intel_dp
))
1706 return need_to_disable
;
1708 power_domain
= intel_display_port_power_domain(intel_encoder
);
1709 intel_display_power_get(dev_priv
, power_domain
);
1711 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1712 port_name(intel_dig_port
->port
));
1714 if (!edp_have_panel_power(intel_dp
))
1715 wait_panel_power_cycle(intel_dp
);
1717 pp
= ironlake_get_pp_control(intel_dp
);
1718 pp
|= EDP_FORCE_VDD
;
1720 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1721 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1723 I915_WRITE(pp_ctrl_reg
, pp
);
1724 POSTING_READ(pp_ctrl_reg
);
1725 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1726 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
1728 * If the panel wasn't on, delay before accessing aux channel
1730 if (!edp_have_panel_power(intel_dp
)) {
1731 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1732 port_name(intel_dig_port
->port
));
1733 msleep(intel_dp
->panel_power_up_delay
);
1736 return need_to_disable
;
1740 * Must be paired with intel_edp_panel_vdd_off() or
1741 * intel_edp_panel_off().
1742 * Nested calls to these functions are not allowed since
1743 * we drop the lock. Caller must use some higher level
1744 * locking to prevent nested calls from other threads.
1746 void intel_edp_panel_vdd_on(struct intel_dp
*intel_dp
)
1750 if (!is_edp(intel_dp
))
1754 vdd
= edp_panel_vdd_on(intel_dp
);
1755 pps_unlock(intel_dp
);
1757 I915_STATE_WARN(!vdd
, "eDP port %c VDD already requested on\n",
1758 port_name(dp_to_dig_port(intel_dp
)->port
));
1761 static void edp_panel_vdd_off_sync(struct intel_dp
*intel_dp
)
1763 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1764 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1765 struct intel_digital_port
*intel_dig_port
=
1766 dp_to_dig_port(intel_dp
);
1767 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1768 enum intel_display_power_domain power_domain
;
1770 u32 pp_stat_reg
, pp_ctrl_reg
;
1772 lockdep_assert_held(&dev_priv
->pps_mutex
);
1774 WARN_ON(intel_dp
->want_panel_vdd
);
1776 if (!edp_have_panel_vdd(intel_dp
))
1779 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1780 port_name(intel_dig_port
->port
));
1782 pp
= ironlake_get_pp_control(intel_dp
);
1783 pp
&= ~EDP_FORCE_VDD
;
1785 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1786 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1788 I915_WRITE(pp_ctrl_reg
, pp
);
1789 POSTING_READ(pp_ctrl_reg
);
1791 /* Make sure sequencer is idle before allowing subsequent activity */
1792 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1793 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
1795 if ((pp
& POWER_TARGET_ON
) == 0)
1796 intel_dp
->last_power_cycle
= jiffies
;
1798 power_domain
= intel_display_port_power_domain(intel_encoder
);
1799 intel_display_power_put(dev_priv
, power_domain
);
1802 static void edp_panel_vdd_work(struct work_struct
*__work
)
1804 struct intel_dp
*intel_dp
= container_of(to_delayed_work(__work
),
1805 struct intel_dp
, panel_vdd_work
);
1808 if (!intel_dp
->want_panel_vdd
)
1809 edp_panel_vdd_off_sync(intel_dp
);
1810 pps_unlock(intel_dp
);
1813 static void edp_panel_vdd_schedule_off(struct intel_dp
*intel_dp
)
1815 unsigned long delay
;
1818 * Queue the timer to fire a long time from now (relative to the power
1819 * down delay) to keep the panel power up across a sequence of
1822 delay
= msecs_to_jiffies(intel_dp
->panel_power_cycle_delay
* 5);
1823 schedule_delayed_work(&intel_dp
->panel_vdd_work
, delay
);
1827 * Must be paired with edp_panel_vdd_on().
1828 * Must hold pps_mutex around the whole on/off sequence.
1829 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1831 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
)
1833 struct drm_i915_private
*dev_priv
=
1834 intel_dp_to_dev(intel_dp
)->dev_private
;
1836 lockdep_assert_held(&dev_priv
->pps_mutex
);
1838 if (!is_edp(intel_dp
))
1841 I915_STATE_WARN(!intel_dp
->want_panel_vdd
, "eDP port %c VDD not forced on",
1842 port_name(dp_to_dig_port(intel_dp
)->port
));
1844 intel_dp
->want_panel_vdd
= false;
1847 edp_panel_vdd_off_sync(intel_dp
);
1849 edp_panel_vdd_schedule_off(intel_dp
);
1852 static void edp_panel_on(struct intel_dp
*intel_dp
)
1854 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1855 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1859 lockdep_assert_held(&dev_priv
->pps_mutex
);
1861 if (!is_edp(intel_dp
))
1864 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1865 port_name(dp_to_dig_port(intel_dp
)->port
));
1867 if (WARN(edp_have_panel_power(intel_dp
),
1868 "eDP port %c panel power already on\n",
1869 port_name(dp_to_dig_port(intel_dp
)->port
)))
1872 wait_panel_power_cycle(intel_dp
);
1874 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1875 pp
= ironlake_get_pp_control(intel_dp
);
1877 /* ILK workaround: disable reset around power sequence */
1878 pp
&= ~PANEL_POWER_RESET
;
1879 I915_WRITE(pp_ctrl_reg
, pp
);
1880 POSTING_READ(pp_ctrl_reg
);
1883 pp
|= POWER_TARGET_ON
;
1885 pp
|= PANEL_POWER_RESET
;
1887 I915_WRITE(pp_ctrl_reg
, pp
);
1888 POSTING_READ(pp_ctrl_reg
);
1890 wait_panel_on(intel_dp
);
1891 intel_dp
->last_power_on
= jiffies
;
1894 pp
|= PANEL_POWER_RESET
; /* restore panel reset bit */
1895 I915_WRITE(pp_ctrl_reg
, pp
);
1896 POSTING_READ(pp_ctrl_reg
);
1900 void intel_edp_panel_on(struct intel_dp
*intel_dp
)
1902 if (!is_edp(intel_dp
))
1906 edp_panel_on(intel_dp
);
1907 pps_unlock(intel_dp
);
1911 static void edp_panel_off(struct intel_dp
*intel_dp
)
1913 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1914 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1915 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1916 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1917 enum intel_display_power_domain power_domain
;
1921 lockdep_assert_held(&dev_priv
->pps_mutex
);
1923 if (!is_edp(intel_dp
))
1926 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1927 port_name(dp_to_dig_port(intel_dp
)->port
));
1929 WARN(!intel_dp
->want_panel_vdd
, "Need eDP port %c VDD to turn off panel\n",
1930 port_name(dp_to_dig_port(intel_dp
)->port
));
1932 pp
= ironlake_get_pp_control(intel_dp
);
1933 /* We need to switch off panel power _and_ force vdd, for otherwise some
1934 * panels get very unhappy and cease to work. */
1935 pp
&= ~(POWER_TARGET_ON
| PANEL_POWER_RESET
| EDP_FORCE_VDD
|
1938 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1940 intel_dp
->want_panel_vdd
= false;
1942 I915_WRITE(pp_ctrl_reg
, pp
);
1943 POSTING_READ(pp_ctrl_reg
);
1945 intel_dp
->last_power_cycle
= jiffies
;
1946 wait_panel_off(intel_dp
);
1948 /* We got a reference when we enabled the VDD. */
1949 power_domain
= intel_display_port_power_domain(intel_encoder
);
1950 intel_display_power_put(dev_priv
, power_domain
);
1953 void intel_edp_panel_off(struct intel_dp
*intel_dp
)
1955 if (!is_edp(intel_dp
))
1959 edp_panel_off(intel_dp
);
1960 pps_unlock(intel_dp
);
1963 /* Enable backlight in the panel power control. */
1964 static void _intel_edp_backlight_on(struct intel_dp
*intel_dp
)
1966 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1967 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
1968 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1973 * If we enable the backlight right away following a panel power
1974 * on, we may see slight flicker as the panel syncs with the eDP
1975 * link. So delay a bit to make sure the image is solid before
1976 * allowing it to appear.
1978 wait_backlight_on(intel_dp
);
1982 pp
= ironlake_get_pp_control(intel_dp
);
1983 pp
|= EDP_BLC_ENABLE
;
1985 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1987 I915_WRITE(pp_ctrl_reg
, pp
);
1988 POSTING_READ(pp_ctrl_reg
);
1990 pps_unlock(intel_dp
);
1993 /* Enable backlight PWM and backlight PP control. */
1994 void intel_edp_backlight_on(struct intel_dp
*intel_dp
)
1996 if (!is_edp(intel_dp
))
1999 DRM_DEBUG_KMS("\n");
2001 intel_panel_enable_backlight(intel_dp
->attached_connector
);
2002 _intel_edp_backlight_on(intel_dp
);
2005 /* Disable backlight in the panel power control. */
2006 static void _intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2008 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2009 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2013 if (!is_edp(intel_dp
))
2018 pp
= ironlake_get_pp_control(intel_dp
);
2019 pp
&= ~EDP_BLC_ENABLE
;
2021 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2023 I915_WRITE(pp_ctrl_reg
, pp
);
2024 POSTING_READ(pp_ctrl_reg
);
2026 pps_unlock(intel_dp
);
2028 intel_dp
->last_backlight_off
= jiffies
;
2029 edp_wait_backlight_off(intel_dp
);
2032 /* Disable backlight PP control and backlight PWM. */
2033 void intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2035 if (!is_edp(intel_dp
))
2038 DRM_DEBUG_KMS("\n");
2040 _intel_edp_backlight_off(intel_dp
);
2041 intel_panel_disable_backlight(intel_dp
->attached_connector
);
2045 * Hook for controlling the panel power control backlight through the bl_power
2046 * sysfs attribute. Take care to handle multiple calls.
2048 static void intel_edp_backlight_power(struct intel_connector
*connector
,
2051 struct intel_dp
*intel_dp
= intel_attached_dp(&connector
->base
);
2055 is_enabled
= ironlake_get_pp_control(intel_dp
) & EDP_BLC_ENABLE
;
2056 pps_unlock(intel_dp
);
2058 if (is_enabled
== enable
)
2061 DRM_DEBUG_KMS("panel power control backlight %s\n",
2062 enable
? "enable" : "disable");
2065 _intel_edp_backlight_on(intel_dp
);
2067 _intel_edp_backlight_off(intel_dp
);
2070 static void ironlake_edp_pll_on(struct intel_dp
*intel_dp
)
2072 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2073 struct drm_crtc
*crtc
= intel_dig_port
->base
.base
.crtc
;
2074 struct drm_device
*dev
= crtc
->dev
;
2075 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2078 assert_pipe_disabled(dev_priv
,
2079 to_intel_crtc(crtc
)->pipe
);
2081 DRM_DEBUG_KMS("\n");
2082 dpa_ctl
= I915_READ(DP_A
);
2083 WARN(dpa_ctl
& DP_PLL_ENABLE
, "dp pll on, should be off\n");
2084 WARN(dpa_ctl
& DP_PORT_EN
, "dp port still on, should be off\n");
2086 /* We don't adjust intel_dp->DP while tearing down the link, to
2087 * facilitate link retraining (e.g. after hotplug). Hence clear all
2088 * enable bits here to ensure that we don't enable too much. */
2089 intel_dp
->DP
&= ~(DP_PORT_EN
| DP_AUDIO_OUTPUT_ENABLE
);
2090 intel_dp
->DP
|= DP_PLL_ENABLE
;
2091 I915_WRITE(DP_A
, intel_dp
->DP
);
2096 static void ironlake_edp_pll_off(struct intel_dp
*intel_dp
)
2098 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2099 struct drm_crtc
*crtc
= intel_dig_port
->base
.base
.crtc
;
2100 struct drm_device
*dev
= crtc
->dev
;
2101 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2104 assert_pipe_disabled(dev_priv
,
2105 to_intel_crtc(crtc
)->pipe
);
2107 dpa_ctl
= I915_READ(DP_A
);
2108 WARN((dpa_ctl
& DP_PLL_ENABLE
) == 0,
2109 "dp pll off, should be on\n");
2110 WARN(dpa_ctl
& DP_PORT_EN
, "dp port still on, should be off\n");
2112 /* We can't rely on the value tracked for the DP register in
2113 * intel_dp->DP because link_down must not change that (otherwise link
2114 * re-training will fail. */
2115 dpa_ctl
&= ~DP_PLL_ENABLE
;
2116 I915_WRITE(DP_A
, dpa_ctl
);
2121 /* If the sink supports it, try to set the power state appropriately */
2122 void intel_dp_sink_dpms(struct intel_dp
*intel_dp
, int mode
)
2126 /* Should have a valid DPCD by this point */
2127 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x11)
2130 if (mode
!= DRM_MODE_DPMS_ON
) {
2131 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2135 * When turning on, we need to retry for 1ms to give the sink
2138 for (i
= 0; i
< 3; i
++) {
2139 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2148 DRM_DEBUG_KMS("failed to %s sink power state\n",
2149 mode
== DRM_MODE_DPMS_ON
? "enable" : "disable");
2152 static bool intel_dp_get_hw_state(struct intel_encoder
*encoder
,
2155 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2156 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2157 struct drm_device
*dev
= encoder
->base
.dev
;
2158 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2159 enum intel_display_power_domain power_domain
;
2162 power_domain
= intel_display_port_power_domain(encoder
);
2163 if (!intel_display_power_is_enabled(dev_priv
, power_domain
))
2166 tmp
= I915_READ(intel_dp
->output_reg
);
2168 if (!(tmp
& DP_PORT_EN
))
2171 if (port
== PORT_A
&& IS_GEN7(dev
) && !IS_VALLEYVIEW(dev
)) {
2172 *pipe
= PORT_TO_PIPE_CPT(tmp
);
2173 } else if (IS_CHERRYVIEW(dev
)) {
2174 *pipe
= DP_PORT_TO_PIPE_CHV(tmp
);
2175 } else if (!HAS_PCH_CPT(dev
) || port
== PORT_A
) {
2176 *pipe
= PORT_TO_PIPE(tmp
);
2182 switch (intel_dp
->output_reg
) {
2184 trans_sel
= TRANS_DP_PORT_SEL_B
;
2187 trans_sel
= TRANS_DP_PORT_SEL_C
;
2190 trans_sel
= TRANS_DP_PORT_SEL_D
;
2196 for_each_pipe(dev_priv
, i
) {
2197 trans_dp
= I915_READ(TRANS_DP_CTL(i
));
2198 if ((trans_dp
& TRANS_DP_PORT_SEL_MASK
) == trans_sel
) {
2204 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2205 intel_dp
->output_reg
);
2211 static void intel_dp_get_config(struct intel_encoder
*encoder
,
2212 struct intel_crtc_state
*pipe_config
)
2214 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2216 struct drm_device
*dev
= encoder
->base
.dev
;
2217 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2218 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2219 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2222 tmp
= I915_READ(intel_dp
->output_reg
);
2224 pipe_config
->has_audio
= tmp
& DP_AUDIO_OUTPUT_ENABLE
&& port
!= PORT_A
;
2226 if ((port
== PORT_A
) || !HAS_PCH_CPT(dev
)) {
2227 if (tmp
& DP_SYNC_HS_HIGH
)
2228 flags
|= DRM_MODE_FLAG_PHSYNC
;
2230 flags
|= DRM_MODE_FLAG_NHSYNC
;
2232 if (tmp
& DP_SYNC_VS_HIGH
)
2233 flags
|= DRM_MODE_FLAG_PVSYNC
;
2235 flags
|= DRM_MODE_FLAG_NVSYNC
;
2237 tmp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
2238 if (tmp
& TRANS_DP_HSYNC_ACTIVE_HIGH
)
2239 flags
|= DRM_MODE_FLAG_PHSYNC
;
2241 flags
|= DRM_MODE_FLAG_NHSYNC
;
2243 if (tmp
& TRANS_DP_VSYNC_ACTIVE_HIGH
)
2244 flags
|= DRM_MODE_FLAG_PVSYNC
;
2246 flags
|= DRM_MODE_FLAG_NVSYNC
;
2249 pipe_config
->base
.adjusted_mode
.flags
|= flags
;
2251 if (!HAS_PCH_SPLIT(dev
) && !IS_VALLEYVIEW(dev
) &&
2252 tmp
& DP_COLOR_RANGE_16_235
)
2253 pipe_config
->limited_color_range
= true;
2255 pipe_config
->has_dp_encoder
= true;
2257 intel_dp_get_m_n(crtc
, pipe_config
);
2259 if (port
== PORT_A
) {
2260 if ((I915_READ(DP_A
) & DP_PLL_FREQ_MASK
) == DP_PLL_FREQ_160MHZ
)
2261 pipe_config
->port_clock
= 162000;
2263 pipe_config
->port_clock
= 270000;
2266 dotclock
= intel_dotclock_calculate(pipe_config
->port_clock
,
2267 &pipe_config
->dp_m_n
);
2269 if (HAS_PCH_SPLIT(dev_priv
->dev
) && port
!= PORT_A
)
2270 ironlake_check_encoder_dotclock(pipe_config
, dotclock
);
2272 pipe_config
->base
.adjusted_mode
.crtc_clock
= dotclock
;
2274 if (is_edp(intel_dp
) && dev_priv
->vbt
.edp_bpp
&&
2275 pipe_config
->pipe_bpp
> dev_priv
->vbt
.edp_bpp
) {
2277 * This is a big fat ugly hack.
2279 * Some machines in UEFI boot mode provide us a VBT that has 18
2280 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2281 * unknown we fail to light up. Yet the same BIOS boots up with
2282 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2283 * max, not what it tells us to use.
2285 * Note: This will still be broken if the eDP panel is not lit
2286 * up by the BIOS, and thus we can't get the mode at module
2289 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2290 pipe_config
->pipe_bpp
, dev_priv
->vbt
.edp_bpp
);
2291 dev_priv
->vbt
.edp_bpp
= pipe_config
->pipe_bpp
;
2295 static void intel_disable_dp(struct intel_encoder
*encoder
)
2297 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2298 struct drm_device
*dev
= encoder
->base
.dev
;
2299 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2301 if (crtc
->config
->has_audio
)
2302 intel_audio_codec_disable(encoder
);
2304 if (HAS_PSR(dev
) && !HAS_DDI(dev
))
2305 intel_psr_disable(intel_dp
);
2307 /* Make sure the panel is off before trying to change the mode. But also
2308 * ensure that we have vdd while we switch off the panel. */
2309 intel_edp_panel_vdd_on(intel_dp
);
2310 intel_edp_backlight_off(intel_dp
);
2311 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_OFF
);
2312 intel_edp_panel_off(intel_dp
);
2314 /* disable the port before the pipe on g4x */
2315 if (INTEL_INFO(dev
)->gen
< 5)
2316 intel_dp_link_down(intel_dp
);
2319 static void ilk_post_disable_dp(struct intel_encoder
*encoder
)
2321 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2322 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2324 intel_dp_link_down(intel_dp
);
2326 ironlake_edp_pll_off(intel_dp
);
2329 static void vlv_post_disable_dp(struct intel_encoder
*encoder
)
2331 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2333 intel_dp_link_down(intel_dp
);
2336 static void chv_post_disable_dp(struct intel_encoder
*encoder
)
2338 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2339 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2340 struct drm_device
*dev
= encoder
->base
.dev
;
2341 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2342 struct intel_crtc
*intel_crtc
=
2343 to_intel_crtc(encoder
->base
.crtc
);
2344 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
2345 enum pipe pipe
= intel_crtc
->pipe
;
2348 intel_dp_link_down(intel_dp
);
2350 mutex_lock(&dev_priv
->dpio_lock
);
2352 /* Propagate soft reset to data lane reset */
2353 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW1(ch
));
2354 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2355 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW1(ch
), val
);
2357 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW1(ch
));
2358 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2359 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW1(ch
), val
);
2361 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW0(ch
));
2362 val
&= ~(DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2363 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW0(ch
), val
);
2365 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW0(ch
));
2366 val
&= ~(DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2367 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW0(ch
), val
);
2369 mutex_unlock(&dev_priv
->dpio_lock
);
2373 _intel_dp_set_link_train(struct intel_dp
*intel_dp
,
2375 uint8_t dp_train_pat
)
2377 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2378 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
2379 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2380 enum port port
= intel_dig_port
->port
;
2383 uint32_t temp
= I915_READ(DP_TP_CTL(port
));
2385 if (dp_train_pat
& DP_LINK_SCRAMBLING_DISABLE
)
2386 temp
|= DP_TP_CTL_SCRAMBLE_DISABLE
;
2388 temp
&= ~DP_TP_CTL_SCRAMBLE_DISABLE
;
2390 temp
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
2391 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2392 case DP_TRAINING_PATTERN_DISABLE
:
2393 temp
|= DP_TP_CTL_LINK_TRAIN_NORMAL
;
2396 case DP_TRAINING_PATTERN_1
:
2397 temp
|= DP_TP_CTL_LINK_TRAIN_PAT1
;
2399 case DP_TRAINING_PATTERN_2
:
2400 temp
|= DP_TP_CTL_LINK_TRAIN_PAT2
;
2402 case DP_TRAINING_PATTERN_3
:
2403 temp
|= DP_TP_CTL_LINK_TRAIN_PAT3
;
2406 I915_WRITE(DP_TP_CTL(port
), temp
);
2408 } else if (HAS_PCH_CPT(dev
) && (IS_GEN7(dev
) || port
!= PORT_A
)) {
2409 *DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
2411 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2412 case DP_TRAINING_PATTERN_DISABLE
:
2413 *DP
|= DP_LINK_TRAIN_OFF_CPT
;
2415 case DP_TRAINING_PATTERN_1
:
2416 *DP
|= DP_LINK_TRAIN_PAT_1_CPT
;
2418 case DP_TRAINING_PATTERN_2
:
2419 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
2421 case DP_TRAINING_PATTERN_3
:
2422 DRM_ERROR("DP training pattern 3 not supported\n");
2423 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
2428 if (IS_CHERRYVIEW(dev
))
2429 *DP
&= ~DP_LINK_TRAIN_MASK_CHV
;
2431 *DP
&= ~DP_LINK_TRAIN_MASK
;
2433 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2434 case DP_TRAINING_PATTERN_DISABLE
:
2435 *DP
|= DP_LINK_TRAIN_OFF
;
2437 case DP_TRAINING_PATTERN_1
:
2438 *DP
|= DP_LINK_TRAIN_PAT_1
;
2440 case DP_TRAINING_PATTERN_2
:
2441 *DP
|= DP_LINK_TRAIN_PAT_2
;
2443 case DP_TRAINING_PATTERN_3
:
2444 if (IS_CHERRYVIEW(dev
)) {
2445 *DP
|= DP_LINK_TRAIN_PAT_3_CHV
;
2447 DRM_ERROR("DP training pattern 3 not supported\n");
2448 *DP
|= DP_LINK_TRAIN_PAT_2
;
2455 static void intel_dp_enable_port(struct intel_dp
*intel_dp
)
2457 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2458 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2460 /* enable with pattern 1 (as per spec) */
2461 _intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
,
2462 DP_TRAINING_PATTERN_1
);
2464 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
2465 POSTING_READ(intel_dp
->output_reg
);
2468 * Magic for VLV/CHV. We _must_ first set up the register
2469 * without actually enabling the port, and then do another
2470 * write to enable the port. Otherwise link training will
2471 * fail when the power sequencer is freshly used for this port.
2473 intel_dp
->DP
|= DP_PORT_EN
;
2475 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
2476 POSTING_READ(intel_dp
->output_reg
);
2479 static void intel_enable_dp(struct intel_encoder
*encoder
)
2481 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2482 struct drm_device
*dev
= encoder
->base
.dev
;
2483 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2484 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2485 uint32_t dp_reg
= I915_READ(intel_dp
->output_reg
);
2487 if (WARN_ON(dp_reg
& DP_PORT_EN
))
2492 if (IS_VALLEYVIEW(dev
))
2493 vlv_init_panel_power_sequencer(intel_dp
);
2495 intel_dp_enable_port(intel_dp
);
2497 edp_panel_vdd_on(intel_dp
);
2498 edp_panel_on(intel_dp
);
2499 edp_panel_vdd_off(intel_dp
, true);
2501 pps_unlock(intel_dp
);
2503 if (IS_VALLEYVIEW(dev
))
2504 vlv_wait_port_ready(dev_priv
, dp_to_dig_port(intel_dp
));
2506 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_ON
);
2507 intel_dp_start_link_train(intel_dp
);
2508 intel_dp_complete_link_train(intel_dp
);
2509 intel_dp_stop_link_train(intel_dp
);
2511 if (crtc
->config
->has_audio
) {
2512 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2513 pipe_name(crtc
->pipe
));
2514 intel_audio_codec_enable(encoder
);
2518 static void g4x_enable_dp(struct intel_encoder
*encoder
)
2520 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2522 intel_enable_dp(encoder
);
2523 intel_edp_backlight_on(intel_dp
);
2526 static void vlv_enable_dp(struct intel_encoder
*encoder
)
2528 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2530 intel_edp_backlight_on(intel_dp
);
2531 intel_psr_enable(intel_dp
);
2534 static void g4x_pre_enable_dp(struct intel_encoder
*encoder
)
2536 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2537 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2539 intel_dp_prepare(encoder
);
2541 /* Only ilk+ has port A */
2542 if (dport
->port
== PORT_A
) {
2543 ironlake_set_pll_cpu_edp(intel_dp
);
2544 ironlake_edp_pll_on(intel_dp
);
2548 static void vlv_detach_power_sequencer(struct intel_dp
*intel_dp
)
2550 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2551 struct drm_i915_private
*dev_priv
= intel_dig_port
->base
.base
.dev
->dev_private
;
2552 enum pipe pipe
= intel_dp
->pps_pipe
;
2553 int pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
2555 edp_panel_vdd_off_sync(intel_dp
);
2558 * VLV seems to get confused when multiple power seqeuencers
2559 * have the same port selected (even if only one has power/vdd
2560 * enabled). The failure manifests as vlv_wait_port_ready() failing
2561 * CHV on the other hand doesn't seem to mind having the same port
2562 * selected in multiple power seqeuencers, but let's clear the
2563 * port select always when logically disconnecting a power sequencer
2566 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2567 pipe_name(pipe
), port_name(intel_dig_port
->port
));
2568 I915_WRITE(pp_on_reg
, 0);
2569 POSTING_READ(pp_on_reg
);
2571 intel_dp
->pps_pipe
= INVALID_PIPE
;
2574 static void vlv_steal_power_sequencer(struct drm_device
*dev
,
2577 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2578 struct intel_encoder
*encoder
;
2580 lockdep_assert_held(&dev_priv
->pps_mutex
);
2582 if (WARN_ON(pipe
!= PIPE_A
&& pipe
!= PIPE_B
))
2585 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
2587 struct intel_dp
*intel_dp
;
2590 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
2593 intel_dp
= enc_to_intel_dp(&encoder
->base
);
2594 port
= dp_to_dig_port(intel_dp
)->port
;
2596 if (intel_dp
->pps_pipe
!= pipe
)
2599 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2600 pipe_name(pipe
), port_name(port
));
2602 WARN(encoder
->connectors_active
,
2603 "stealing pipe %c power sequencer from active eDP port %c\n",
2604 pipe_name(pipe
), port_name(port
));
2606 /* make sure vdd is off before we steal it */
2607 vlv_detach_power_sequencer(intel_dp
);
2611 static void vlv_init_panel_power_sequencer(struct intel_dp
*intel_dp
)
2613 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2614 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
2615 struct drm_device
*dev
= encoder
->base
.dev
;
2616 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2617 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2619 lockdep_assert_held(&dev_priv
->pps_mutex
);
2621 if (!is_edp(intel_dp
))
2624 if (intel_dp
->pps_pipe
== crtc
->pipe
)
2628 * If another power sequencer was being used on this
2629 * port previously make sure to turn off vdd there while
2630 * we still have control of it.
2632 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
2633 vlv_detach_power_sequencer(intel_dp
);
2636 * We may be stealing the power
2637 * sequencer from another port.
2639 vlv_steal_power_sequencer(dev
, crtc
->pipe
);
2641 /* now it's all ours */
2642 intel_dp
->pps_pipe
= crtc
->pipe
;
2644 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2645 pipe_name(intel_dp
->pps_pipe
), port_name(intel_dig_port
->port
));
2647 /* init power sequencer on this pipe and port */
2648 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
2649 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
2652 static void vlv_pre_enable_dp(struct intel_encoder
*encoder
)
2654 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2655 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2656 struct drm_device
*dev
= encoder
->base
.dev
;
2657 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2658 struct intel_crtc
*intel_crtc
= to_intel_crtc(encoder
->base
.crtc
);
2659 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2660 int pipe
= intel_crtc
->pipe
;
2663 mutex_lock(&dev_priv
->dpio_lock
);
2665 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW8(port
));
2672 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW8(port
), val
);
2673 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW14(port
), 0x00760018);
2674 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW23(port
), 0x00400888);
2676 mutex_unlock(&dev_priv
->dpio_lock
);
2678 intel_enable_dp(encoder
);
2681 static void vlv_dp_pre_pll_enable(struct intel_encoder
*encoder
)
2683 struct intel_digital_port
*dport
= enc_to_dig_port(&encoder
->base
);
2684 struct drm_device
*dev
= encoder
->base
.dev
;
2685 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2686 struct intel_crtc
*intel_crtc
=
2687 to_intel_crtc(encoder
->base
.crtc
);
2688 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2689 int pipe
= intel_crtc
->pipe
;
2691 intel_dp_prepare(encoder
);
2693 /* Program Tx lane resets to default */
2694 mutex_lock(&dev_priv
->dpio_lock
);
2695 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW0(port
),
2696 DPIO_PCS_TX_LANE2_RESET
|
2697 DPIO_PCS_TX_LANE1_RESET
);
2698 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW1(port
),
2699 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN
|
2700 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN
|
2701 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT
) |
2702 DPIO_PCS_CLK_SOFT_RESET
);
2704 /* Fix up inter-pair skew failure */
2705 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW12(port
), 0x00750f00);
2706 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW11(port
), 0x00001500);
2707 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW14(port
), 0x40400000);
2708 mutex_unlock(&dev_priv
->dpio_lock
);
2711 static void chv_pre_enable_dp(struct intel_encoder
*encoder
)
2713 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2714 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2715 struct drm_device
*dev
= encoder
->base
.dev
;
2716 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2717 struct intel_crtc
*intel_crtc
=
2718 to_intel_crtc(encoder
->base
.crtc
);
2719 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
2720 int pipe
= intel_crtc
->pipe
;
2724 mutex_lock(&dev_priv
->dpio_lock
);
2726 /* allow hardware to manage TX FIFO reset source */
2727 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW11(ch
));
2728 val
&= ~DPIO_LANEDESKEW_STRAP_OVRD
;
2729 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW11(ch
), val
);
2731 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW11(ch
));
2732 val
&= ~DPIO_LANEDESKEW_STRAP_OVRD
;
2733 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW11(ch
), val
);
2735 /* Deassert soft data lane reset*/
2736 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW1(ch
));
2737 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2738 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW1(ch
), val
);
2740 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW1(ch
));
2741 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2742 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW1(ch
), val
);
2744 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW0(ch
));
2745 val
|= (DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2746 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW0(ch
), val
);
2748 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW0(ch
));
2749 val
|= (DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2750 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW0(ch
), val
);
2752 /* Program Tx lane latency optimal setting*/
2753 for (i
= 0; i
< 4; i
++) {
2754 /* Set the upar bit */
2755 data
= (i
== 1) ? 0x0 : 0x1;
2756 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW14(ch
, i
),
2757 data
<< DPIO_UPAR_SHIFT
);
2760 /* Data lane stagger programming */
2761 /* FIXME: Fix up value only after power analysis */
2763 mutex_unlock(&dev_priv
->dpio_lock
);
2765 intel_enable_dp(encoder
);
2768 static void chv_dp_pre_pll_enable(struct intel_encoder
*encoder
)
2770 struct intel_digital_port
*dport
= enc_to_dig_port(&encoder
->base
);
2771 struct drm_device
*dev
= encoder
->base
.dev
;
2772 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2773 struct intel_crtc
*intel_crtc
=
2774 to_intel_crtc(encoder
->base
.crtc
);
2775 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
2776 enum pipe pipe
= intel_crtc
->pipe
;
2779 intel_dp_prepare(encoder
);
2781 mutex_lock(&dev_priv
->dpio_lock
);
2783 /* program left/right clock distribution */
2784 if (pipe
!= PIPE_B
) {
2785 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
);
2786 val
&= ~(CHV_BUFLEFTENA1_MASK
| CHV_BUFRIGHTENA1_MASK
);
2788 val
|= CHV_BUFLEFTENA1_FORCE
;
2790 val
|= CHV_BUFRIGHTENA1_FORCE
;
2791 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
, val
);
2793 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
);
2794 val
&= ~(CHV_BUFLEFTENA2_MASK
| CHV_BUFRIGHTENA2_MASK
);
2796 val
|= CHV_BUFLEFTENA2_FORCE
;
2798 val
|= CHV_BUFRIGHTENA2_FORCE
;
2799 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
, val
);
2802 /* program clock channel usage */
2803 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW8(ch
));
2804 val
|= CHV_PCS_USEDCLKCHANNEL_OVRRIDE
;
2806 val
&= ~CHV_PCS_USEDCLKCHANNEL
;
2808 val
|= CHV_PCS_USEDCLKCHANNEL
;
2809 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW8(ch
), val
);
2811 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW8(ch
));
2812 val
|= CHV_PCS_USEDCLKCHANNEL_OVRRIDE
;
2814 val
&= ~CHV_PCS_USEDCLKCHANNEL
;
2816 val
|= CHV_PCS_USEDCLKCHANNEL
;
2817 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW8(ch
), val
);
2820 * This a a bit weird since generally CL
2821 * matches the pipe, but here we need to
2822 * pick the CL based on the port.
2824 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW19(ch
));
2826 val
&= ~CHV_CMN_USEDCLKCHANNEL
;
2828 val
|= CHV_CMN_USEDCLKCHANNEL
;
2829 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW19(ch
), val
);
2831 mutex_unlock(&dev_priv
->dpio_lock
);
2835 * Native read with retry for link status and receiver capability reads for
2836 * cases where the sink may still be asleep.
2838 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2839 * supposed to retry 3 times per the spec.
2842 intel_dp_dpcd_read_wake(struct drm_dp_aux
*aux
, unsigned int offset
,
2843 void *buffer
, size_t size
)
2849 * Sometime we just get the same incorrect byte repeated
2850 * over the entire buffer. Doing just one throw away read
2851 * initially seems to "solve" it.
2853 drm_dp_dpcd_read(aux
, DP_DPCD_REV
, buffer
, 1);
2855 for (i
= 0; i
< 3; i
++) {
2856 ret
= drm_dp_dpcd_read(aux
, offset
, buffer
, size
);
2866 * Fetch AUX CH registers 0x202 - 0x207 which contain
2867 * link status information
2870 intel_dp_get_link_status(struct intel_dp
*intel_dp
, uint8_t link_status
[DP_LINK_STATUS_SIZE
])
2872 return intel_dp_dpcd_read_wake(&intel_dp
->aux
,
2875 DP_LINK_STATUS_SIZE
) == DP_LINK_STATUS_SIZE
;
2878 /* These are source-specific values. */
2880 intel_dp_voltage_max(struct intel_dp
*intel_dp
)
2882 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2883 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2884 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2886 if (INTEL_INFO(dev
)->gen
>= 9) {
2887 if (dev_priv
->vbt
.edp_low_vswing
&& port
== PORT_A
)
2888 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
2889 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
2890 } else if (IS_VALLEYVIEW(dev
))
2891 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
2892 else if (IS_GEN7(dev
) && port
== PORT_A
)
2893 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
2894 else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
)
2895 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
2897 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
2901 intel_dp_pre_emphasis_max(struct intel_dp
*intel_dp
, uint8_t voltage_swing
)
2903 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2904 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2906 if (INTEL_INFO(dev
)->gen
>= 9) {
2907 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
2908 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
2909 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
2910 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
2911 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
2912 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
2913 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
2914 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
2915 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
2917 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
2919 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
2920 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
2921 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
2922 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
2923 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
2924 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
2925 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
2926 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
2927 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
2929 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
2931 } else if (IS_VALLEYVIEW(dev
)) {
2932 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
2933 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
2934 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
2935 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
2936 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
2937 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
2938 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
2939 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
2941 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
2943 } else if (IS_GEN7(dev
) && port
== PORT_A
) {
2944 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
2945 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
2946 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
2947 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
2948 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
2949 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
2951 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
2954 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
2955 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
2956 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
2957 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
2958 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
2959 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
2960 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
2961 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
2963 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
2968 static uint32_t intel_vlv_signal_levels(struct intel_dp
*intel_dp
)
2970 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2971 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2972 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2973 struct intel_crtc
*intel_crtc
=
2974 to_intel_crtc(dport
->base
.base
.crtc
);
2975 unsigned long demph_reg_value
, preemph_reg_value
,
2976 uniqtranscale_reg_value
;
2977 uint8_t train_set
= intel_dp
->train_set
[0];
2978 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2979 int pipe
= intel_crtc
->pipe
;
2981 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
2982 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
2983 preemph_reg_value
= 0x0004000;
2984 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
2985 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
2986 demph_reg_value
= 0x2B405555;
2987 uniqtranscale_reg_value
= 0x552AB83A;
2989 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
2990 demph_reg_value
= 0x2B404040;
2991 uniqtranscale_reg_value
= 0x5548B83A;
2993 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
2994 demph_reg_value
= 0x2B245555;
2995 uniqtranscale_reg_value
= 0x5560B83A;
2997 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
2998 demph_reg_value
= 0x2B405555;
2999 uniqtranscale_reg_value
= 0x5598DA3A;
3005 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3006 preemph_reg_value
= 0x0002000;
3007 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3008 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3009 demph_reg_value
= 0x2B404040;
3010 uniqtranscale_reg_value
= 0x5552B83A;
3012 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3013 demph_reg_value
= 0x2B404848;
3014 uniqtranscale_reg_value
= 0x5580B83A;
3016 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3017 demph_reg_value
= 0x2B404040;
3018 uniqtranscale_reg_value
= 0x55ADDA3A;
3024 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3025 preemph_reg_value
= 0x0000000;
3026 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3027 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3028 demph_reg_value
= 0x2B305555;
3029 uniqtranscale_reg_value
= 0x5570B83A;
3031 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3032 demph_reg_value
= 0x2B2B4040;
3033 uniqtranscale_reg_value
= 0x55ADDA3A;
3039 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3040 preemph_reg_value
= 0x0006000;
3041 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3042 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3043 demph_reg_value
= 0x1B405555;
3044 uniqtranscale_reg_value
= 0x55ADDA3A;
3054 mutex_lock(&dev_priv
->dpio_lock
);
3055 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW5(port
), 0x00000000);
3056 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW4(port
), demph_reg_value
);
3057 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW2(port
),
3058 uniqtranscale_reg_value
);
3059 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW3(port
), 0x0C782040);
3060 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW11(port
), 0x00030000);
3061 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW9(port
), preemph_reg_value
);
3062 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW5(port
), 0x80000000);
3063 mutex_unlock(&dev_priv
->dpio_lock
);
3068 static uint32_t intel_chv_signal_levels(struct intel_dp
*intel_dp
)
3070 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3071 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3072 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
3073 struct intel_crtc
*intel_crtc
= to_intel_crtc(dport
->base
.base
.crtc
);
3074 u32 deemph_reg_value
, margin_reg_value
, val
;
3075 uint8_t train_set
= intel_dp
->train_set
[0];
3076 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
3077 enum pipe pipe
= intel_crtc
->pipe
;
3080 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3081 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3082 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3083 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3084 deemph_reg_value
= 128;
3085 margin_reg_value
= 52;
3087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3088 deemph_reg_value
= 128;
3089 margin_reg_value
= 77;
3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3092 deemph_reg_value
= 128;
3093 margin_reg_value
= 102;
3095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3096 deemph_reg_value
= 128;
3097 margin_reg_value
= 154;
3098 /* FIXME extra to set for 1200 */
3104 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3105 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3106 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3107 deemph_reg_value
= 85;
3108 margin_reg_value
= 78;
3110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3111 deemph_reg_value
= 85;
3112 margin_reg_value
= 116;
3114 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3115 deemph_reg_value
= 85;
3116 margin_reg_value
= 154;
3122 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3123 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3124 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3125 deemph_reg_value
= 64;
3126 margin_reg_value
= 104;
3128 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3129 deemph_reg_value
= 64;
3130 margin_reg_value
= 154;
3136 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3137 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3138 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3139 deemph_reg_value
= 43;
3140 margin_reg_value
= 154;
3150 mutex_lock(&dev_priv
->dpio_lock
);
3152 /* Clear calc init */
3153 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW10(ch
));
3154 val
&= ~(DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
);
3155 val
&= ~(DPIO_PCS_TX1DEEMP_MASK
| DPIO_PCS_TX2DEEMP_MASK
);
3156 val
|= DPIO_PCS_TX1DEEMP_9P5
| DPIO_PCS_TX2DEEMP_9P5
;
3157 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW10(ch
), val
);
3159 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW10(ch
));
3160 val
&= ~(DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
);
3161 val
&= ~(DPIO_PCS_TX1DEEMP_MASK
| DPIO_PCS_TX2DEEMP_MASK
);
3162 val
|= DPIO_PCS_TX1DEEMP_9P5
| DPIO_PCS_TX2DEEMP_9P5
;
3163 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW10(ch
), val
);
3165 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW9(ch
));
3166 val
&= ~(DPIO_PCS_TX1MARGIN_MASK
| DPIO_PCS_TX2MARGIN_MASK
);
3167 val
|= DPIO_PCS_TX1MARGIN_000
| DPIO_PCS_TX2MARGIN_000
;
3168 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW9(ch
), val
);
3170 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW9(ch
));
3171 val
&= ~(DPIO_PCS_TX1MARGIN_MASK
| DPIO_PCS_TX2MARGIN_MASK
);
3172 val
|= DPIO_PCS_TX1MARGIN_000
| DPIO_PCS_TX2MARGIN_000
;
3173 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW9(ch
), val
);
3175 /* Program swing deemph */
3176 for (i
= 0; i
< 4; i
++) {
3177 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW4(ch
, i
));
3178 val
&= ~DPIO_SWING_DEEMPH9P5_MASK
;
3179 val
|= deemph_reg_value
<< DPIO_SWING_DEEMPH9P5_SHIFT
;
3180 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW4(ch
, i
), val
);
3183 /* Program swing margin */
3184 for (i
= 0; i
< 4; i
++) {
3185 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
));
3186 val
&= ~DPIO_SWING_MARGIN000_MASK
;
3187 val
|= margin_reg_value
<< DPIO_SWING_MARGIN000_SHIFT
;
3188 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
), val
);
3191 /* Disable unique transition scale */
3192 for (i
= 0; i
< 4; i
++) {
3193 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
));
3194 val
&= ~DPIO_TX_UNIQ_TRANS_SCALE_EN
;
3195 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
), val
);
3198 if (((train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
)
3199 == DP_TRAIN_PRE_EMPH_LEVEL_0
) &&
3200 ((train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
)
3201 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3
)) {
3204 * The document said it needs to set bit 27 for ch0 and bit 26
3205 * for ch1. Might be a typo in the doc.
3206 * For now, for this unique transition scale selection, set bit
3207 * 27 for ch0 and ch1.
3209 for (i
= 0; i
< 4; i
++) {
3210 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
));
3211 val
|= DPIO_TX_UNIQ_TRANS_SCALE_EN
;
3212 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
), val
);
3215 for (i
= 0; i
< 4; i
++) {
3216 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
));
3217 val
&= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT
);
3218 val
|= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT
);
3219 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
), val
);
3223 /* Start swing calculation */
3224 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW10(ch
));
3225 val
|= DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
;
3226 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW10(ch
), val
);
3228 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW10(ch
));
3229 val
|= DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
;
3230 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW10(ch
), val
);
3233 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW30
);
3234 val
|= DPIO_LRC_BYPASS
;
3235 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW30
, val
);
3237 mutex_unlock(&dev_priv
->dpio_lock
);
3243 intel_get_adjust_train(struct intel_dp
*intel_dp
,
3244 const uint8_t link_status
[DP_LINK_STATUS_SIZE
])
3249 uint8_t voltage_max
;
3250 uint8_t preemph_max
;
3252 for (lane
= 0; lane
< intel_dp
->lane_count
; lane
++) {
3253 uint8_t this_v
= drm_dp_get_adjust_request_voltage(link_status
, lane
);
3254 uint8_t this_p
= drm_dp_get_adjust_request_pre_emphasis(link_status
, lane
);
3262 voltage_max
= intel_dp_voltage_max(intel_dp
);
3263 if (v
>= voltage_max
)
3264 v
= voltage_max
| DP_TRAIN_MAX_SWING_REACHED
;
3266 preemph_max
= intel_dp_pre_emphasis_max(intel_dp
, v
);
3267 if (p
>= preemph_max
)
3268 p
= preemph_max
| DP_TRAIN_MAX_PRE_EMPHASIS_REACHED
;
3270 for (lane
= 0; lane
< 4; lane
++)
3271 intel_dp
->train_set
[lane
] = v
| p
;
3275 intel_gen4_signal_levels(uint8_t train_set
)
3277 uint32_t signal_levels
= 0;
3279 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3282 signal_levels
|= DP_VOLTAGE_0_4
;
3284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3285 signal_levels
|= DP_VOLTAGE_0_6
;
3287 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3288 signal_levels
|= DP_VOLTAGE_0_8
;
3290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3291 signal_levels
|= DP_VOLTAGE_1_2
;
3294 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3295 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3297 signal_levels
|= DP_PRE_EMPHASIS_0
;
3299 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3300 signal_levels
|= DP_PRE_EMPHASIS_3_5
;
3302 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3303 signal_levels
|= DP_PRE_EMPHASIS_6
;
3305 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3306 signal_levels
|= DP_PRE_EMPHASIS_9_5
;
3309 return signal_levels
;
3312 /* Gen6's DP voltage swing and pre-emphasis control */
3314 intel_gen6_edp_signal_levels(uint8_t train_set
)
3316 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3317 DP_TRAIN_PRE_EMPHASIS_MASK
);
3318 switch (signal_levels
) {
3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3321 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3323 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B
;
3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3326 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B
;
3327 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3329 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B
;
3330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3331 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3332 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B
;
3334 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3335 "0x%x\n", signal_levels
);
3336 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3340 /* Gen7's DP voltage swing and pre-emphasis control */
3342 intel_gen7_edp_signal_levels(uint8_t train_set
)
3344 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3345 DP_TRAIN_PRE_EMPHASIS_MASK
);
3346 switch (signal_levels
) {
3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3348 return EDP_LINK_TRAIN_400MV_0DB_IVB
;
3349 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3350 return EDP_LINK_TRAIN_400MV_3_5DB_IVB
;
3351 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3352 return EDP_LINK_TRAIN_400MV_6DB_IVB
;
3354 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3355 return EDP_LINK_TRAIN_600MV_0DB_IVB
;
3356 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3357 return EDP_LINK_TRAIN_600MV_3_5DB_IVB
;
3359 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3360 return EDP_LINK_TRAIN_800MV_0DB_IVB
;
3361 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3362 return EDP_LINK_TRAIN_800MV_3_5DB_IVB
;
3365 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3366 "0x%x\n", signal_levels
);
3367 return EDP_LINK_TRAIN_500MV_0DB_IVB
;
3371 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3373 intel_hsw_signal_levels(uint8_t train_set
)
3375 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3376 DP_TRAIN_PRE_EMPHASIS_MASK
);
3377 switch (signal_levels
) {
3378 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3379 return DDI_BUF_TRANS_SELECT(0);
3380 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3381 return DDI_BUF_TRANS_SELECT(1);
3382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3383 return DDI_BUF_TRANS_SELECT(2);
3384 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_3
:
3385 return DDI_BUF_TRANS_SELECT(3);
3387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3388 return DDI_BUF_TRANS_SELECT(4);
3389 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3390 return DDI_BUF_TRANS_SELECT(5);
3391 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3392 return DDI_BUF_TRANS_SELECT(6);
3394 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3395 return DDI_BUF_TRANS_SELECT(7);
3396 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3397 return DDI_BUF_TRANS_SELECT(8);
3399 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3400 return DDI_BUF_TRANS_SELECT(9);
3402 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3403 "0x%x\n", signal_levels
);
3404 return DDI_BUF_TRANS_SELECT(0);
3408 /* Properly updates "DP" with the correct signal levels. */
3410 intel_dp_set_signal_levels(struct intel_dp
*intel_dp
, uint32_t *DP
)
3412 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3413 enum port port
= intel_dig_port
->port
;
3414 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3415 uint32_t signal_levels
, mask
;
3416 uint8_t train_set
= intel_dp
->train_set
[0];
3418 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
) || INTEL_INFO(dev
)->gen
>= 9) {
3419 signal_levels
= intel_hsw_signal_levels(train_set
);
3420 mask
= DDI_BUF_EMP_MASK
;
3421 } else if (IS_CHERRYVIEW(dev
)) {
3422 signal_levels
= intel_chv_signal_levels(intel_dp
);
3424 } else if (IS_VALLEYVIEW(dev
)) {
3425 signal_levels
= intel_vlv_signal_levels(intel_dp
);
3427 } else if (IS_GEN7(dev
) && port
== PORT_A
) {
3428 signal_levels
= intel_gen7_edp_signal_levels(train_set
);
3429 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_IVB
;
3430 } else if (IS_GEN6(dev
) && port
== PORT_A
) {
3431 signal_levels
= intel_gen6_edp_signal_levels(train_set
);
3432 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_SNB
;
3434 signal_levels
= intel_gen4_signal_levels(train_set
);
3435 mask
= DP_VOLTAGE_MASK
| DP_PRE_EMPHASIS_MASK
;
3438 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels
);
3440 *DP
= (*DP
& ~mask
) | signal_levels
;
3444 intel_dp_set_link_train(struct intel_dp
*intel_dp
,
3446 uint8_t dp_train_pat
)
3448 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3449 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3450 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3451 uint8_t buf
[sizeof(intel_dp
->train_set
) + 1];
3454 _intel_dp_set_link_train(intel_dp
, DP
, dp_train_pat
);
3456 I915_WRITE(intel_dp
->output_reg
, *DP
);
3457 POSTING_READ(intel_dp
->output_reg
);
3459 buf
[0] = dp_train_pat
;
3460 if ((dp_train_pat
& DP_TRAINING_PATTERN_MASK
) ==
3461 DP_TRAINING_PATTERN_DISABLE
) {
3462 /* don't write DP_TRAINING_LANEx_SET on disable */
3465 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3466 memcpy(buf
+ 1, intel_dp
->train_set
, intel_dp
->lane_count
);
3467 len
= intel_dp
->lane_count
+ 1;
3470 ret
= drm_dp_dpcd_write(&intel_dp
->aux
, DP_TRAINING_PATTERN_SET
,
3477 intel_dp_reset_link_train(struct intel_dp
*intel_dp
, uint32_t *DP
,
3478 uint8_t dp_train_pat
)
3480 memset(intel_dp
->train_set
, 0, sizeof(intel_dp
->train_set
));
3481 intel_dp_set_signal_levels(intel_dp
, DP
);
3482 return intel_dp_set_link_train(intel_dp
, DP
, dp_train_pat
);
3486 intel_dp_update_link_train(struct intel_dp
*intel_dp
, uint32_t *DP
,
3487 const uint8_t link_status
[DP_LINK_STATUS_SIZE
])
3489 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3490 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3491 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3494 intel_get_adjust_train(intel_dp
, link_status
);
3495 intel_dp_set_signal_levels(intel_dp
, DP
);
3497 I915_WRITE(intel_dp
->output_reg
, *DP
);
3498 POSTING_READ(intel_dp
->output_reg
);
3500 ret
= drm_dp_dpcd_write(&intel_dp
->aux
, DP_TRAINING_LANE0_SET
,
3501 intel_dp
->train_set
, intel_dp
->lane_count
);
3503 return ret
== intel_dp
->lane_count
;
3506 static void intel_dp_set_idle_link_train(struct intel_dp
*intel_dp
)
3508 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3509 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3510 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3511 enum port port
= intel_dig_port
->port
;
3517 val
= I915_READ(DP_TP_CTL(port
));
3518 val
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
3519 val
|= DP_TP_CTL_LINK_TRAIN_IDLE
;
3520 I915_WRITE(DP_TP_CTL(port
), val
);
3523 * On PORT_A we can have only eDP in SST mode. There the only reason
3524 * we need to set idle transmission mode is to work around a HW issue
3525 * where we enable the pipe while not in idle link-training mode.
3526 * In this case there is requirement to wait for a minimum number of
3527 * idle patterns to be sent.
3532 if (wait_for((I915_READ(DP_TP_STATUS(port
)) & DP_TP_STATUS_IDLE_DONE
),
3534 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3537 /* Enable corresponding port and start training pattern 1 */
3539 intel_dp_start_link_train(struct intel_dp
*intel_dp
)
3541 struct drm_encoder
*encoder
= &dp_to_dig_port(intel_dp
)->base
.base
;
3542 struct drm_device
*dev
= encoder
->dev
;
3545 int voltage_tries
, loop_tries
;
3546 uint32_t DP
= intel_dp
->DP
;
3547 uint8_t link_config
[2];
3550 intel_ddi_prepare_link_retrain(encoder
);
3552 /* Write the link configuration data */
3553 link_config
[0] = intel_dp
->link_bw
;
3554 link_config
[1] = intel_dp
->lane_count
;
3555 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
3556 link_config
[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN
;
3557 drm_dp_dpcd_write(&intel_dp
->aux
, DP_LINK_BW_SET
, link_config
, 2);
3558 if (intel_dp
->num_sink_rates
)
3559 drm_dp_dpcd_write(&intel_dp
->aux
, DP_LINK_RATE_SET
,
3560 &intel_dp
->rate_select
, 1);
3563 link_config
[1] = DP_SET_ANSI_8B10B
;
3564 drm_dp_dpcd_write(&intel_dp
->aux
, DP_DOWNSPREAD_CTRL
, link_config
, 2);
3568 /* clock recovery */
3569 if (!intel_dp_reset_link_train(intel_dp
, &DP
,
3570 DP_TRAINING_PATTERN_1
|
3571 DP_LINK_SCRAMBLING_DISABLE
)) {
3572 DRM_ERROR("failed to enable link training\n");
3580 uint8_t link_status
[DP_LINK_STATUS_SIZE
];
3582 drm_dp_link_train_clock_recovery_delay(intel_dp
->dpcd
);
3583 if (!intel_dp_get_link_status(intel_dp
, link_status
)) {
3584 DRM_ERROR("failed to get link status\n");
3588 if (drm_dp_clock_recovery_ok(link_status
, intel_dp
->lane_count
)) {
3589 DRM_DEBUG_KMS("clock recovery OK\n");
3593 /* Check to see if we've tried the max voltage */
3594 for (i
= 0; i
< intel_dp
->lane_count
; i
++)
3595 if ((intel_dp
->train_set
[i
] & DP_TRAIN_MAX_SWING_REACHED
) == 0)
3597 if (i
== intel_dp
->lane_count
) {
3599 if (loop_tries
== 5) {
3600 DRM_ERROR("too many full retries, give up\n");
3603 intel_dp_reset_link_train(intel_dp
, &DP
,
3604 DP_TRAINING_PATTERN_1
|
3605 DP_LINK_SCRAMBLING_DISABLE
);
3610 /* Check to see if we've tried the same voltage 5 times */
3611 if ((intel_dp
->train_set
[0] & DP_TRAIN_VOLTAGE_SWING_MASK
) == voltage
) {
3613 if (voltage_tries
== 5) {
3614 DRM_ERROR("too many voltage retries, give up\n");
3619 voltage
= intel_dp
->train_set
[0] & DP_TRAIN_VOLTAGE_SWING_MASK
;
3621 /* Update training set as requested by target */
3622 if (!intel_dp_update_link_train(intel_dp
, &DP
, link_status
)) {
3623 DRM_ERROR("failed to update link training\n");
3632 intel_dp_complete_link_train(struct intel_dp
*intel_dp
)
3634 bool channel_eq
= false;
3635 int tries
, cr_tries
;
3636 uint32_t DP
= intel_dp
->DP
;
3637 uint32_t training_pattern
= DP_TRAINING_PATTERN_2
;
3639 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3640 if (intel_dp
->link_bw
== DP_LINK_BW_5_4
|| intel_dp
->use_tps3
)
3641 training_pattern
= DP_TRAINING_PATTERN_3
;
3643 /* channel equalization */
3644 if (!intel_dp_set_link_train(intel_dp
, &DP
,
3646 DP_LINK_SCRAMBLING_DISABLE
)) {
3647 DRM_ERROR("failed to start channel equalization\n");
3655 uint8_t link_status
[DP_LINK_STATUS_SIZE
];
3658 DRM_ERROR("failed to train DP, aborting\n");
3662 drm_dp_link_train_channel_eq_delay(intel_dp
->dpcd
);
3663 if (!intel_dp_get_link_status(intel_dp
, link_status
)) {
3664 DRM_ERROR("failed to get link status\n");
3668 /* Make sure clock is still ok */
3669 if (!drm_dp_clock_recovery_ok(link_status
, intel_dp
->lane_count
)) {
3670 intel_dp_start_link_train(intel_dp
);
3671 intel_dp_set_link_train(intel_dp
, &DP
,
3673 DP_LINK_SCRAMBLING_DISABLE
);
3678 if (drm_dp_channel_eq_ok(link_status
, intel_dp
->lane_count
)) {
3683 /* Try 5 times, then try clock recovery if that fails */
3685 intel_dp_start_link_train(intel_dp
);
3686 intel_dp_set_link_train(intel_dp
, &DP
,
3688 DP_LINK_SCRAMBLING_DISABLE
);
3694 /* Update training set as requested by target */
3695 if (!intel_dp_update_link_train(intel_dp
, &DP
, link_status
)) {
3696 DRM_ERROR("failed to update link training\n");
3702 intel_dp_set_idle_link_train(intel_dp
);
3707 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3711 void intel_dp_stop_link_train(struct intel_dp
*intel_dp
)
3713 intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
,
3714 DP_TRAINING_PATTERN_DISABLE
);
3718 intel_dp_link_down(struct intel_dp
*intel_dp
)
3720 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3721 enum port port
= intel_dig_port
->port
;
3722 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3723 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3724 uint32_t DP
= intel_dp
->DP
;
3726 if (WARN_ON(HAS_DDI(dev
)))
3729 if (WARN_ON((I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
) == 0))
3732 DRM_DEBUG_KMS("\n");
3734 if (HAS_PCH_CPT(dev
) && (IS_GEN7(dev
) || port
!= PORT_A
)) {
3735 DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
3736 I915_WRITE(intel_dp
->output_reg
, DP
| DP_LINK_TRAIN_PAT_IDLE_CPT
);
3738 if (IS_CHERRYVIEW(dev
))
3739 DP
&= ~DP_LINK_TRAIN_MASK_CHV
;
3741 DP
&= ~DP_LINK_TRAIN_MASK
;
3742 I915_WRITE(intel_dp
->output_reg
, DP
| DP_LINK_TRAIN_PAT_IDLE
);
3744 POSTING_READ(intel_dp
->output_reg
);
3746 if (HAS_PCH_IBX(dev
) &&
3747 I915_READ(intel_dp
->output_reg
) & DP_PIPEB_SELECT
) {
3748 /* Hardware workaround: leaving our transcoder select
3749 * set to transcoder B while it's off will prevent the
3750 * corresponding HDMI output on transcoder A.
3752 * Combine this with another hardware workaround:
3753 * transcoder select bit can only be cleared while the
3756 DP
&= ~DP_PIPEB_SELECT
;
3757 I915_WRITE(intel_dp
->output_reg
, DP
);
3758 POSTING_READ(intel_dp
->output_reg
);
3761 DP
&= ~DP_AUDIO_OUTPUT_ENABLE
;
3762 I915_WRITE(intel_dp
->output_reg
, DP
& ~DP_PORT_EN
);
3763 POSTING_READ(intel_dp
->output_reg
);
3764 msleep(intel_dp
->panel_power_down_delay
);
3768 intel_dp_get_dpcd(struct intel_dp
*intel_dp
)
3770 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3771 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
3772 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3775 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, 0x000, intel_dp
->dpcd
,
3776 sizeof(intel_dp
->dpcd
)) < 0)
3777 return false; /* aux transfer failed */
3779 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp
->dpcd
), intel_dp
->dpcd
);
3781 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0)
3782 return false; /* DPCD not present */
3784 /* Check if the panel supports PSR */
3785 memset(intel_dp
->psr_dpcd
, 0, sizeof(intel_dp
->psr_dpcd
));
3786 if (is_edp(intel_dp
)) {
3787 intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_PSR_SUPPORT
,
3789 sizeof(intel_dp
->psr_dpcd
));
3790 if (intel_dp
->psr_dpcd
[0] & DP_PSR_IS_SUPPORTED
) {
3791 dev_priv
->psr
.sink_support
= true;
3792 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3796 /* Training Pattern 3 support, Intel platforms that support HBR2 alone
3797 * have support for TP3 hence that check is used along with dpcd check
3798 * to ensure TP3 can be enabled.
3799 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3800 * supported but still not enabled.
3802 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x12 &&
3803 intel_dp
->dpcd
[DP_MAX_LANE_COUNT
] & DP_TPS3_SUPPORTED
&&
3804 intel_dp_source_supports_hbr2(dev
)) {
3805 intel_dp
->use_tps3
= true;
3806 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3808 intel_dp
->use_tps3
= false;
3810 /* Intermediate frequency support */
3811 if (is_edp(intel_dp
) &&
3812 (intel_dp
->dpcd
[DP_EDP_CONFIGURATION_CAP
] & DP_DPCD_DISPLAY_CONTROL_CAPABLE
) &&
3813 (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_EDP_DPCD_REV
, &rev
, 1) == 1) &&
3814 (rev
>= 0x03)) { /* eDp v1.4 or higher */
3815 __le16 sink_rates
[DP_MAX_SUPPORTED_RATES
];
3818 intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3819 DP_SUPPORTED_LINK_RATES
,
3821 sizeof(sink_rates
));
3823 for (i
= 0; i
< ARRAY_SIZE(sink_rates
); i
++) {
3824 int val
= le16_to_cpu(sink_rates
[i
]);
3829 /* Value read is in kHz while drm clock is saved in deca-kHz */
3830 intel_dp
->sink_rates
[i
] = (val
* 200) / 10;
3832 intel_dp
->num_sink_rates
= i
;
3835 intel_dp_print_rates(intel_dp
);
3837 if (!(intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
3838 DP_DWN_STRM_PORT_PRESENT
))
3839 return true; /* native DP sink */
3841 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0x10)
3842 return true; /* no per-port downstream info */
3844 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_DOWNSTREAM_PORT_0
,
3845 intel_dp
->downstream_ports
,
3846 DP_MAX_DOWNSTREAM_PORTS
) < 0)
3847 return false; /* downstream port status fetch failed */
3853 intel_dp_probe_oui(struct intel_dp
*intel_dp
)
3857 if (!(intel_dp
->dpcd
[DP_DOWN_STREAM_PORT_COUNT
] & DP_OUI_SUPPORT
))
3860 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_SINK_OUI
, buf
, 3) == 3)
3861 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3862 buf
[0], buf
[1], buf
[2]);
3864 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_BRANCH_OUI
, buf
, 3) == 3)
3865 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3866 buf
[0], buf
[1], buf
[2]);
3870 intel_dp_probe_mst(struct intel_dp
*intel_dp
)
3874 if (!intel_dp
->can_mst
)
3877 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x12)
3880 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_MSTM_CAP
, buf
, 1)) {
3881 if (buf
[0] & DP_MST_CAP
) {
3882 DRM_DEBUG_KMS("Sink is MST capable\n");
3883 intel_dp
->is_mst
= true;
3885 DRM_DEBUG_KMS("Sink is not MST capable\n");
3886 intel_dp
->is_mst
= false;
3890 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
3891 return intel_dp
->is_mst
;
3894 int intel_dp_sink_crc(struct intel_dp
*intel_dp
, u8
*crc
)
3896 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3897 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3898 struct intel_crtc
*intel_crtc
=
3899 to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
3904 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK_MISC
, &buf
) < 0)
3907 if (!(buf
& DP_TEST_CRC_SUPPORTED
))
3910 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK
, &buf
) < 0)
3913 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_SINK
,
3914 buf
| DP_TEST_SINK_START
) < 0)
3917 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK_MISC
, &buf
) < 0)
3919 test_crc_count
= buf
& DP_TEST_COUNT_MASK
;
3922 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
3923 DP_TEST_SINK_MISC
, &buf
) < 0)
3925 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
3926 } while (--attempts
&& (buf
& DP_TEST_COUNT_MASK
) == test_crc_count
);
3928 if (attempts
== 0) {
3929 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3933 if (drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_CRC_R_CR
, crc
, 6) < 0)
3936 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK
, &buf
) < 0)
3938 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_SINK
,
3939 buf
& ~DP_TEST_SINK_START
) < 0)
3946 intel_dp_get_sink_irq(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
3948 return intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3949 DP_DEVICE_SERVICE_IRQ_VECTOR
,
3950 sink_irq_vector
, 1) == 1;
3954 intel_dp_get_sink_irq_esi(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
3958 ret
= intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3960 sink_irq_vector
, 14);
3968 intel_dp_handle_test_request(struct intel_dp
*intel_dp
)
3970 /* NAK by default */
3971 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_RESPONSE
, DP_TEST_NAK
);
3975 intel_dp_check_mst_status(struct intel_dp
*intel_dp
)
3979 if (intel_dp
->is_mst
) {
3984 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
3988 /* check link status - esi[10] = 0x200c */
3989 if (intel_dp
->active_mst_links
&& !drm_dp_channel_eq_ok(&esi
[10], intel_dp
->lane_count
)) {
3990 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3991 intel_dp_start_link_train(intel_dp
);
3992 intel_dp_complete_link_train(intel_dp
);
3993 intel_dp_stop_link_train(intel_dp
);
3996 DRM_DEBUG_KMS("got esi %3ph\n", esi
);
3997 ret
= drm_dp_mst_hpd_irq(&intel_dp
->mst_mgr
, esi
, &handled
);
4000 for (retry
= 0; retry
< 3; retry
++) {
4002 wret
= drm_dp_dpcd_write(&intel_dp
->aux
,
4003 DP_SINK_COUNT_ESI
+1,
4010 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4012 DRM_DEBUG_KMS("got esi2 %3ph\n", esi
);
4020 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4021 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4022 intel_dp
->is_mst
= false;
4023 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
4024 /* send a hotplug event */
4025 drm_kms_helper_hotplug_event(intel_dig_port
->base
.base
.dev
);
4032 * According to DP spec
4035 * 2. Configure link according to Receiver Capabilities
4036 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4037 * 4. Check link status on receipt of hot-plug interrupt
4040 intel_dp_check_link_status(struct intel_dp
*intel_dp
)
4042 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4043 struct intel_encoder
*intel_encoder
= &dp_to_dig_port(intel_dp
)->base
;
4045 u8 link_status
[DP_LINK_STATUS_SIZE
];
4047 WARN_ON(!drm_modeset_is_locked(&dev
->mode_config
.connection_mutex
));
4049 if (!intel_encoder
->connectors_active
)
4052 if (WARN_ON(!intel_encoder
->base
.crtc
))
4055 if (!to_intel_crtc(intel_encoder
->base
.crtc
)->active
)
4058 /* Try to read receiver status if the link appears to be up */
4059 if (!intel_dp_get_link_status(intel_dp
, link_status
)) {
4063 /* Now read the DPCD to see if it's actually running */
4064 if (!intel_dp_get_dpcd(intel_dp
)) {
4068 /* Try to read the source of the interrupt */
4069 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4070 intel_dp_get_sink_irq(intel_dp
, &sink_irq_vector
)) {
4071 /* Clear interrupt source */
4072 drm_dp_dpcd_writeb(&intel_dp
->aux
,
4073 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4076 if (sink_irq_vector
& DP_AUTOMATED_TEST_REQUEST
)
4077 intel_dp_handle_test_request(intel_dp
);
4078 if (sink_irq_vector
& (DP_CP_IRQ
| DP_SINK_SPECIFIC_IRQ
))
4079 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4082 if (!drm_dp_channel_eq_ok(link_status
, intel_dp
->lane_count
)) {
4083 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4084 intel_encoder
->base
.name
);
4085 intel_dp_start_link_train(intel_dp
);
4086 intel_dp_complete_link_train(intel_dp
);
4087 intel_dp_stop_link_train(intel_dp
);
4091 /* XXX this is probably wrong for multiple downstream ports */
4092 static enum drm_connector_status
4093 intel_dp_detect_dpcd(struct intel_dp
*intel_dp
)
4095 uint8_t *dpcd
= intel_dp
->dpcd
;
4098 if (!intel_dp_get_dpcd(intel_dp
))
4099 return connector_status_disconnected
;
4101 /* if there's no downstream port, we're done */
4102 if (!(dpcd
[DP_DOWNSTREAMPORT_PRESENT
] & DP_DWN_STRM_PORT_PRESENT
))
4103 return connector_status_connected
;
4105 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4106 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4107 intel_dp
->downstream_ports
[0] & DP_DS_PORT_HPD
) {
4110 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_SINK_COUNT
,
4112 return connector_status_unknown
;
4114 return DP_GET_SINK_COUNT(reg
) ? connector_status_connected
4115 : connector_status_disconnected
;
4118 /* If no HPD, poke DDC gently */
4119 if (drm_probe_ddc(&intel_dp
->aux
.ddc
))
4120 return connector_status_connected
;
4122 /* Well we tried, say unknown for unreliable port types */
4123 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11) {
4124 type
= intel_dp
->downstream_ports
[0] & DP_DS_PORT_TYPE_MASK
;
4125 if (type
== DP_DS_PORT_TYPE_VGA
||
4126 type
== DP_DS_PORT_TYPE_NON_EDID
)
4127 return connector_status_unknown
;
4129 type
= intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
4130 DP_DWN_STRM_PORT_TYPE_MASK
;
4131 if (type
== DP_DWN_STRM_PORT_TYPE_ANALOG
||
4132 type
== DP_DWN_STRM_PORT_TYPE_OTHER
)
4133 return connector_status_unknown
;
4136 /* Anything else is out of spec, warn and ignore */
4137 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4138 return connector_status_disconnected
;
4141 static enum drm_connector_status
4142 edp_detect(struct intel_dp
*intel_dp
)
4144 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4145 enum drm_connector_status status
;
4147 status
= intel_panel_detect(dev
);
4148 if (status
== connector_status_unknown
)
4149 status
= connector_status_connected
;
4154 static enum drm_connector_status
4155 ironlake_dp_detect(struct intel_dp
*intel_dp
)
4157 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4158 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4159 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4161 if (!ibx_digital_port_connected(dev_priv
, intel_dig_port
))
4162 return connector_status_disconnected
;
4164 return intel_dp_detect_dpcd(intel_dp
);
4167 static int g4x_digital_port_connected(struct drm_device
*dev
,
4168 struct intel_digital_port
*intel_dig_port
)
4170 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4173 if (IS_VALLEYVIEW(dev
)) {
4174 switch (intel_dig_port
->port
) {
4176 bit
= PORTB_HOTPLUG_LIVE_STATUS_VLV
;
4179 bit
= PORTC_HOTPLUG_LIVE_STATUS_VLV
;
4182 bit
= PORTD_HOTPLUG_LIVE_STATUS_VLV
;
4188 switch (intel_dig_port
->port
) {
4190 bit
= PORTB_HOTPLUG_LIVE_STATUS_G4X
;
4193 bit
= PORTC_HOTPLUG_LIVE_STATUS_G4X
;
4196 bit
= PORTD_HOTPLUG_LIVE_STATUS_G4X
;
4203 if ((I915_READ(PORT_HOTPLUG_STAT
) & bit
) == 0)
4208 static enum drm_connector_status
4209 g4x_dp_detect(struct intel_dp
*intel_dp
)
4211 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4212 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4215 /* Can't disconnect eDP, but you can close the lid... */
4216 if (is_edp(intel_dp
)) {
4217 enum drm_connector_status status
;
4219 status
= intel_panel_detect(dev
);
4220 if (status
== connector_status_unknown
)
4221 status
= connector_status_connected
;
4225 ret
= g4x_digital_port_connected(dev
, intel_dig_port
);
4227 return connector_status_unknown
;
4229 return connector_status_disconnected
;
4231 return intel_dp_detect_dpcd(intel_dp
);
4234 static struct edid
*
4235 intel_dp_get_edid(struct intel_dp
*intel_dp
)
4237 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4239 /* use cached edid if we have one */
4240 if (intel_connector
->edid
) {
4242 if (IS_ERR(intel_connector
->edid
))
4245 return drm_edid_duplicate(intel_connector
->edid
);
4247 return drm_get_edid(&intel_connector
->base
,
4248 &intel_dp
->aux
.ddc
);
4252 intel_dp_set_edid(struct intel_dp
*intel_dp
)
4254 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4257 edid
= intel_dp_get_edid(intel_dp
);
4258 intel_connector
->detect_edid
= edid
;
4260 if (intel_dp
->force_audio
!= HDMI_AUDIO_AUTO
)
4261 intel_dp
->has_audio
= intel_dp
->force_audio
== HDMI_AUDIO_ON
;
4263 intel_dp
->has_audio
= drm_detect_monitor_audio(edid
);
4267 intel_dp_unset_edid(struct intel_dp
*intel_dp
)
4269 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4271 kfree(intel_connector
->detect_edid
);
4272 intel_connector
->detect_edid
= NULL
;
4274 intel_dp
->has_audio
= false;
4277 static enum intel_display_power_domain
4278 intel_dp_power_get(struct intel_dp
*dp
)
4280 struct intel_encoder
*encoder
= &dp_to_dig_port(dp
)->base
;
4281 enum intel_display_power_domain power_domain
;
4283 power_domain
= intel_display_port_power_domain(encoder
);
4284 intel_display_power_get(to_i915(encoder
->base
.dev
), power_domain
);
4286 return power_domain
;
4290 intel_dp_power_put(struct intel_dp
*dp
,
4291 enum intel_display_power_domain power_domain
)
4293 struct intel_encoder
*encoder
= &dp_to_dig_port(dp
)->base
;
4294 intel_display_power_put(to_i915(encoder
->base
.dev
), power_domain
);
4297 static enum drm_connector_status
4298 intel_dp_detect(struct drm_connector
*connector
, bool force
)
4300 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
4301 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4302 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4303 struct drm_device
*dev
= connector
->dev
;
4304 enum drm_connector_status status
;
4305 enum intel_display_power_domain power_domain
;
4308 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4309 connector
->base
.id
, connector
->name
);
4310 intel_dp_unset_edid(intel_dp
);
4312 if (intel_dp
->is_mst
) {
4313 /* MST devices are disconnected from a monitor POV */
4314 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4315 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4316 return connector_status_disconnected
;
4319 power_domain
= intel_dp_power_get(intel_dp
);
4321 /* Can't disconnect eDP, but you can close the lid... */
4322 if (is_edp(intel_dp
))
4323 status
= edp_detect(intel_dp
);
4324 else if (HAS_PCH_SPLIT(dev
))
4325 status
= ironlake_dp_detect(intel_dp
);
4327 status
= g4x_dp_detect(intel_dp
);
4328 if (status
!= connector_status_connected
)
4331 intel_dp_probe_oui(intel_dp
);
4333 ret
= intel_dp_probe_mst(intel_dp
);
4335 /* if we are in MST mode then this connector
4336 won't appear connected or have anything with EDID on it */
4337 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4338 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4339 status
= connector_status_disconnected
;
4343 intel_dp_set_edid(intel_dp
);
4345 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4346 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4347 status
= connector_status_connected
;
4350 intel_dp_power_put(intel_dp
, power_domain
);
4355 intel_dp_force(struct drm_connector
*connector
)
4357 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
4358 struct intel_encoder
*intel_encoder
= &dp_to_dig_port(intel_dp
)->base
;
4359 enum intel_display_power_domain power_domain
;
4361 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4362 connector
->base
.id
, connector
->name
);
4363 intel_dp_unset_edid(intel_dp
);
4365 if (connector
->status
!= connector_status_connected
)
4368 power_domain
= intel_dp_power_get(intel_dp
);
4370 intel_dp_set_edid(intel_dp
);
4372 intel_dp_power_put(intel_dp
, power_domain
);
4374 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4375 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4378 static int intel_dp_get_modes(struct drm_connector
*connector
)
4380 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4383 edid
= intel_connector
->detect_edid
;
4385 int ret
= intel_connector_update_modes(connector
, edid
);
4390 /* if eDP has no EDID, fall back to fixed mode */
4391 if (is_edp(intel_attached_dp(connector
)) &&
4392 intel_connector
->panel
.fixed_mode
) {
4393 struct drm_display_mode
*mode
;
4395 mode
= drm_mode_duplicate(connector
->dev
,
4396 intel_connector
->panel
.fixed_mode
);
4398 drm_mode_probed_add(connector
, mode
);
4407 intel_dp_detect_audio(struct drm_connector
*connector
)
4409 bool has_audio
= false;
4412 edid
= to_intel_connector(connector
)->detect_edid
;
4414 has_audio
= drm_detect_monitor_audio(edid
);
4420 intel_dp_set_property(struct drm_connector
*connector
,
4421 struct drm_property
*property
,
4424 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
4425 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4426 struct intel_encoder
*intel_encoder
= intel_attached_encoder(connector
);
4427 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4430 ret
= drm_object_property_set_value(&connector
->base
, property
, val
);
4434 if (property
== dev_priv
->force_audio_property
) {
4438 if (i
== intel_dp
->force_audio
)
4441 intel_dp
->force_audio
= i
;
4443 if (i
== HDMI_AUDIO_AUTO
)
4444 has_audio
= intel_dp_detect_audio(connector
);
4446 has_audio
= (i
== HDMI_AUDIO_ON
);
4448 if (has_audio
== intel_dp
->has_audio
)
4451 intel_dp
->has_audio
= has_audio
;
4455 if (property
== dev_priv
->broadcast_rgb_property
) {
4456 bool old_auto
= intel_dp
->color_range_auto
;
4457 uint32_t old_range
= intel_dp
->color_range
;
4460 case INTEL_BROADCAST_RGB_AUTO
:
4461 intel_dp
->color_range_auto
= true;
4463 case INTEL_BROADCAST_RGB_FULL
:
4464 intel_dp
->color_range_auto
= false;
4465 intel_dp
->color_range
= 0;
4467 case INTEL_BROADCAST_RGB_LIMITED
:
4468 intel_dp
->color_range_auto
= false;
4469 intel_dp
->color_range
= DP_COLOR_RANGE_16_235
;
4475 if (old_auto
== intel_dp
->color_range_auto
&&
4476 old_range
== intel_dp
->color_range
)
4482 if (is_edp(intel_dp
) &&
4483 property
== connector
->dev
->mode_config
.scaling_mode_property
) {
4484 if (val
== DRM_MODE_SCALE_NONE
) {
4485 DRM_DEBUG_KMS("no scaling not supported\n");
4489 if (intel_connector
->panel
.fitting_mode
== val
) {
4490 /* the eDP scaling property is not changed */
4493 intel_connector
->panel
.fitting_mode
= val
;
4501 if (intel_encoder
->base
.crtc
)
4502 intel_crtc_restore_mode(intel_encoder
->base
.crtc
);
4508 intel_dp_connector_destroy(struct drm_connector
*connector
)
4510 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4512 kfree(intel_connector
->detect_edid
);
4514 if (!IS_ERR_OR_NULL(intel_connector
->edid
))
4515 kfree(intel_connector
->edid
);
4517 /* Can't call is_edp() since the encoder may have been destroyed
4519 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4520 intel_panel_fini(&intel_connector
->panel
);
4522 drm_connector_cleanup(connector
);
4526 void intel_dp_encoder_destroy(struct drm_encoder
*encoder
)
4528 struct intel_digital_port
*intel_dig_port
= enc_to_dig_port(encoder
);
4529 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
4531 drm_dp_aux_unregister(&intel_dp
->aux
);
4532 intel_dp_mst_encoder_cleanup(intel_dig_port
);
4533 if (is_edp(intel_dp
)) {
4534 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
4536 * vdd might still be enabled do to the delayed vdd off.
4537 * Make sure vdd is actually turned off here.
4540 edp_panel_vdd_off_sync(intel_dp
);
4541 pps_unlock(intel_dp
);
4543 if (intel_dp
->edp_notifier
.notifier_call
) {
4544 unregister_reboot_notifier(&intel_dp
->edp_notifier
);
4545 intel_dp
->edp_notifier
.notifier_call
= NULL
;
4548 drm_encoder_cleanup(encoder
);
4549 kfree(intel_dig_port
);
4552 static void intel_dp_encoder_suspend(struct intel_encoder
*intel_encoder
)
4554 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4556 if (!is_edp(intel_dp
))
4560 * vdd might still be enabled do to the delayed vdd off.
4561 * Make sure vdd is actually turned off here.
4563 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
4565 edp_panel_vdd_off_sync(intel_dp
);
4566 pps_unlock(intel_dp
);
4569 static void intel_edp_panel_vdd_sanitize(struct intel_dp
*intel_dp
)
4571 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4572 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
4573 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4574 enum intel_display_power_domain power_domain
;
4576 lockdep_assert_held(&dev_priv
->pps_mutex
);
4578 if (!edp_have_panel_vdd(intel_dp
))
4582 * The VDD bit needs a power domain reference, so if the bit is
4583 * already enabled when we boot or resume, grab this reference and
4584 * schedule a vdd off, so we don't hold on to the reference
4587 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4588 power_domain
= intel_display_port_power_domain(&intel_dig_port
->base
);
4589 intel_display_power_get(dev_priv
, power_domain
);
4591 edp_panel_vdd_schedule_off(intel_dp
);
4594 static void intel_dp_encoder_reset(struct drm_encoder
*encoder
)
4596 struct intel_dp
*intel_dp
;
4598 if (to_intel_encoder(encoder
)->type
!= INTEL_OUTPUT_EDP
)
4601 intel_dp
= enc_to_intel_dp(encoder
);
4606 * Read out the current power sequencer assignment,
4607 * in case the BIOS did something with it.
4609 if (IS_VALLEYVIEW(encoder
->dev
))
4610 vlv_initial_power_sequencer_setup(intel_dp
);
4612 intel_edp_panel_vdd_sanitize(intel_dp
);
4614 pps_unlock(intel_dp
);
4617 static const struct drm_connector_funcs intel_dp_connector_funcs
= {
4618 .dpms
= intel_connector_dpms
,
4619 .detect
= intel_dp_detect
,
4620 .force
= intel_dp_force
,
4621 .fill_modes
= drm_helper_probe_single_connector_modes
,
4622 .set_property
= intel_dp_set_property
,
4623 .atomic_get_property
= intel_connector_atomic_get_property
,
4624 .destroy
= intel_dp_connector_destroy
,
4625 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
4626 .atomic_duplicate_state
= drm_atomic_helper_connector_duplicate_state
,
4629 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs
= {
4630 .get_modes
= intel_dp_get_modes
,
4631 .mode_valid
= intel_dp_mode_valid
,
4632 .best_encoder
= intel_best_encoder
,
4635 static const struct drm_encoder_funcs intel_dp_enc_funcs
= {
4636 .reset
= intel_dp_encoder_reset
,
4637 .destroy
= intel_dp_encoder_destroy
,
4641 intel_dp_hot_plug(struct intel_encoder
*intel_encoder
)
4647 intel_dp_hpd_pulse(struct intel_digital_port
*intel_dig_port
, bool long_hpd
)
4649 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
4650 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4651 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
4652 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4653 enum intel_display_power_domain power_domain
;
4654 enum irqreturn ret
= IRQ_NONE
;
4656 if (intel_dig_port
->base
.type
!= INTEL_OUTPUT_EDP
)
4657 intel_dig_port
->base
.type
= INTEL_OUTPUT_DISPLAYPORT
;
4659 if (long_hpd
&& intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
) {
4661 * vdd off can generate a long pulse on eDP which
4662 * would require vdd on to handle it, and thus we
4663 * would end up in an endless cycle of
4664 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4666 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4667 port_name(intel_dig_port
->port
));
4671 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4672 port_name(intel_dig_port
->port
),
4673 long_hpd
? "long" : "short");
4675 power_domain
= intel_display_port_power_domain(intel_encoder
);
4676 intel_display_power_get(dev_priv
, power_domain
);
4680 if (HAS_PCH_SPLIT(dev
)) {
4681 if (!ibx_digital_port_connected(dev_priv
, intel_dig_port
))
4684 if (g4x_digital_port_connected(dev
, intel_dig_port
) != 1)
4688 if (!intel_dp_get_dpcd(intel_dp
)) {
4692 intel_dp_probe_oui(intel_dp
);
4694 if (!intel_dp_probe_mst(intel_dp
)) {
4695 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
4696 intel_dp_check_link_status(intel_dp
);
4697 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
4701 if (intel_dp
->is_mst
) {
4702 if (intel_dp_check_mst_status(intel_dp
) == -EINVAL
)
4706 if (!intel_dp
->is_mst
) {
4707 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
4708 intel_dp_check_link_status(intel_dp
);
4709 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
4717 /* if we were in MST mode, and device is not there get out of MST mode */
4718 if (intel_dp
->is_mst
) {
4719 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp
->is_mst
, intel_dp
->mst_mgr
.mst_state
);
4720 intel_dp
->is_mst
= false;
4721 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
4724 intel_display_power_put(dev_priv
, power_domain
);
4729 /* Return which DP Port should be selected for Transcoder DP control */
4731 intel_trans_dp_port_sel(struct drm_crtc
*crtc
)
4733 struct drm_device
*dev
= crtc
->dev
;
4734 struct intel_encoder
*intel_encoder
;
4735 struct intel_dp
*intel_dp
;
4737 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
) {
4738 intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4740 if (intel_encoder
->type
== INTEL_OUTPUT_DISPLAYPORT
||
4741 intel_encoder
->type
== INTEL_OUTPUT_EDP
)
4742 return intel_dp
->output_reg
;
4748 /* check the VBT to see whether the eDP is on DP-D port */
4749 bool intel_dp_is_edp(struct drm_device
*dev
, enum port port
)
4751 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4752 union child_device_config
*p_child
;
4754 static const short port_mapping
[] = {
4755 [PORT_B
] = PORT_IDPB
,
4756 [PORT_C
] = PORT_IDPC
,
4757 [PORT_D
] = PORT_IDPD
,
4763 if (!dev_priv
->vbt
.child_dev_num
)
4766 for (i
= 0; i
< dev_priv
->vbt
.child_dev_num
; i
++) {
4767 p_child
= dev_priv
->vbt
.child_dev
+ i
;
4769 if (p_child
->common
.dvo_port
== port_mapping
[port
] &&
4770 (p_child
->common
.device_type
& DEVICE_TYPE_eDP_BITS
) ==
4771 (DEVICE_TYPE_eDP
& DEVICE_TYPE_eDP_BITS
))
4778 intel_dp_add_properties(struct intel_dp
*intel_dp
, struct drm_connector
*connector
)
4780 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4782 intel_attach_force_audio_property(connector
);
4783 intel_attach_broadcast_rgb_property(connector
);
4784 intel_dp
->color_range_auto
= true;
4786 if (is_edp(intel_dp
)) {
4787 drm_mode_create_scaling_mode_property(connector
->dev
);
4788 drm_object_attach_property(
4790 connector
->dev
->mode_config
.scaling_mode_property
,
4791 DRM_MODE_SCALE_ASPECT
);
4792 intel_connector
->panel
.fitting_mode
= DRM_MODE_SCALE_ASPECT
;
4796 static void intel_dp_init_panel_power_timestamps(struct intel_dp
*intel_dp
)
4798 intel_dp
->last_power_cycle
= jiffies
;
4799 intel_dp
->last_power_on
= jiffies
;
4800 intel_dp
->last_backlight_off
= jiffies
;
4804 intel_dp_init_panel_power_sequencer(struct drm_device
*dev
,
4805 struct intel_dp
*intel_dp
)
4807 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4808 struct edp_power_seq cur
, vbt
, spec
,
4809 *final
= &intel_dp
->pps_delays
;
4810 u32 pp_on
, pp_off
, pp_div
, pp
;
4811 int pp_ctrl_reg
, pp_on_reg
, pp_off_reg
, pp_div_reg
;
4813 lockdep_assert_held(&dev_priv
->pps_mutex
);
4815 /* already initialized? */
4816 if (final
->t11_t12
!= 0)
4819 if (HAS_PCH_SPLIT(dev
)) {
4820 pp_ctrl_reg
= PCH_PP_CONTROL
;
4821 pp_on_reg
= PCH_PP_ON_DELAYS
;
4822 pp_off_reg
= PCH_PP_OFF_DELAYS
;
4823 pp_div_reg
= PCH_PP_DIVISOR
;
4825 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
4827 pp_ctrl_reg
= VLV_PIPE_PP_CONTROL(pipe
);
4828 pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
4829 pp_off_reg
= VLV_PIPE_PP_OFF_DELAYS(pipe
);
4830 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
4833 /* Workaround: Need to write PP_CONTROL with the unlock key as
4834 * the very first thing. */
4835 pp
= ironlake_get_pp_control(intel_dp
);
4836 I915_WRITE(pp_ctrl_reg
, pp
);
4838 pp_on
= I915_READ(pp_on_reg
);
4839 pp_off
= I915_READ(pp_off_reg
);
4840 pp_div
= I915_READ(pp_div_reg
);
4842 /* Pull timing values out of registers */
4843 cur
.t1_t3
= (pp_on
& PANEL_POWER_UP_DELAY_MASK
) >>
4844 PANEL_POWER_UP_DELAY_SHIFT
;
4846 cur
.t8
= (pp_on
& PANEL_LIGHT_ON_DELAY_MASK
) >>
4847 PANEL_LIGHT_ON_DELAY_SHIFT
;
4849 cur
.t9
= (pp_off
& PANEL_LIGHT_OFF_DELAY_MASK
) >>
4850 PANEL_LIGHT_OFF_DELAY_SHIFT
;
4852 cur
.t10
= (pp_off
& PANEL_POWER_DOWN_DELAY_MASK
) >>
4853 PANEL_POWER_DOWN_DELAY_SHIFT
;
4855 cur
.t11_t12
= ((pp_div
& PANEL_POWER_CYCLE_DELAY_MASK
) >>
4856 PANEL_POWER_CYCLE_DELAY_SHIFT
) * 1000;
4858 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4859 cur
.t1_t3
, cur
.t8
, cur
.t9
, cur
.t10
, cur
.t11_t12
);
4861 vbt
= dev_priv
->vbt
.edp_pps
;
4863 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4864 * our hw here, which are all in 100usec. */
4865 spec
.t1_t3
= 210 * 10;
4866 spec
.t8
= 50 * 10; /* no limit for t8, use t7 instead */
4867 spec
.t9
= 50 * 10; /* no limit for t9, make it symmetric with t8 */
4868 spec
.t10
= 500 * 10;
4869 /* This one is special and actually in units of 100ms, but zero
4870 * based in the hw (so we need to add 100 ms). But the sw vbt
4871 * table multiplies it with 1000 to make it in units of 100usec,
4873 spec
.t11_t12
= (510 + 100) * 10;
4875 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4876 vbt
.t1_t3
, vbt
.t8
, vbt
.t9
, vbt
.t10
, vbt
.t11_t12
);
4878 /* Use the max of the register settings and vbt. If both are
4879 * unset, fall back to the spec limits. */
4880 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
4882 max(cur.field, vbt.field))
4883 assign_final(t1_t3
);
4887 assign_final(t11_t12
);
4890 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
4891 intel_dp
->panel_power_up_delay
= get_delay(t1_t3
);
4892 intel_dp
->backlight_on_delay
= get_delay(t8
);
4893 intel_dp
->backlight_off_delay
= get_delay(t9
);
4894 intel_dp
->panel_power_down_delay
= get_delay(t10
);
4895 intel_dp
->panel_power_cycle_delay
= get_delay(t11_t12
);
4898 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4899 intel_dp
->panel_power_up_delay
, intel_dp
->panel_power_down_delay
,
4900 intel_dp
->panel_power_cycle_delay
);
4902 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4903 intel_dp
->backlight_on_delay
, intel_dp
->backlight_off_delay
);
4907 intel_dp_init_panel_power_sequencer_registers(struct drm_device
*dev
,
4908 struct intel_dp
*intel_dp
)
4910 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4911 u32 pp_on
, pp_off
, pp_div
, port_sel
= 0;
4912 int div
= HAS_PCH_SPLIT(dev
) ? intel_pch_rawclk(dev
) : intel_hrawclk(dev
);
4913 int pp_on_reg
, pp_off_reg
, pp_div_reg
;
4914 enum port port
= dp_to_dig_port(intel_dp
)->port
;
4915 const struct edp_power_seq
*seq
= &intel_dp
->pps_delays
;
4917 lockdep_assert_held(&dev_priv
->pps_mutex
);
4919 if (HAS_PCH_SPLIT(dev
)) {
4920 pp_on_reg
= PCH_PP_ON_DELAYS
;
4921 pp_off_reg
= PCH_PP_OFF_DELAYS
;
4922 pp_div_reg
= PCH_PP_DIVISOR
;
4924 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
4926 pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
4927 pp_off_reg
= VLV_PIPE_PP_OFF_DELAYS(pipe
);
4928 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
4932 * And finally store the new values in the power sequencer. The
4933 * backlight delays are set to 1 because we do manual waits on them. For
4934 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4935 * we'll end up waiting for the backlight off delay twice: once when we
4936 * do the manual sleep, and once when we disable the panel and wait for
4937 * the PP_STATUS bit to become zero.
4939 pp_on
= (seq
->t1_t3
<< PANEL_POWER_UP_DELAY_SHIFT
) |
4940 (1 << PANEL_LIGHT_ON_DELAY_SHIFT
);
4941 pp_off
= (1 << PANEL_LIGHT_OFF_DELAY_SHIFT
) |
4942 (seq
->t10
<< PANEL_POWER_DOWN_DELAY_SHIFT
);
4943 /* Compute the divisor for the pp clock, simply match the Bspec
4945 pp_div
= ((100 * div
)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT
;
4946 pp_div
|= (DIV_ROUND_UP(seq
->t11_t12
, 1000)
4947 << PANEL_POWER_CYCLE_DELAY_SHIFT
);
4949 /* Haswell doesn't have any port selection bits for the panel
4950 * power sequencer any more. */
4951 if (IS_VALLEYVIEW(dev
)) {
4952 port_sel
= PANEL_PORT_SELECT_VLV(port
);
4953 } else if (HAS_PCH_IBX(dev
) || HAS_PCH_CPT(dev
)) {
4955 port_sel
= PANEL_PORT_SELECT_DPA
;
4957 port_sel
= PANEL_PORT_SELECT_DPD
;
4962 I915_WRITE(pp_on_reg
, pp_on
);
4963 I915_WRITE(pp_off_reg
, pp_off
);
4964 I915_WRITE(pp_div_reg
, pp_div
);
4966 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4967 I915_READ(pp_on_reg
),
4968 I915_READ(pp_off_reg
),
4969 I915_READ(pp_div_reg
));
4973 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4975 * @refresh_rate: RR to be programmed
4977 * This function gets called when refresh rate (RR) has to be changed from
4978 * one frequency to another. Switches can be between high and low RR
4979 * supported by the panel or to any other RR based on media playback (in
4980 * this case, RR value needs to be passed from user space).
4982 * The caller of this function needs to take a lock on dev_priv->drrs.
4984 static void intel_dp_set_drrs_state(struct drm_device
*dev
, int refresh_rate
)
4986 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4987 struct intel_encoder
*encoder
;
4988 struct intel_digital_port
*dig_port
= NULL
;
4989 struct intel_dp
*intel_dp
= dev_priv
->drrs
.dp
;
4990 struct intel_crtc_state
*config
= NULL
;
4991 struct intel_crtc
*intel_crtc
= NULL
;
4993 enum drrs_refresh_rate_type index
= DRRS_HIGH_RR
;
4995 if (refresh_rate
<= 0) {
4996 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5000 if (intel_dp
== NULL
) {
5001 DRM_DEBUG_KMS("DRRS not supported.\n");
5006 * FIXME: This needs proper synchronization with psr state for some
5007 * platforms that cannot have PSR and DRRS enabled at the same time.
5010 dig_port
= dp_to_dig_port(intel_dp
);
5011 encoder
= &dig_port
->base
;
5012 intel_crtc
= to_intel_crtc(encoder
->base
.crtc
);
5015 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5019 config
= intel_crtc
->config
;
5021 if (dev_priv
->drrs
.type
< SEAMLESS_DRRS_SUPPORT
) {
5022 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5026 if (intel_dp
->attached_connector
->panel
.downclock_mode
->vrefresh
==
5028 index
= DRRS_LOW_RR
;
5030 if (index
== dev_priv
->drrs
.refresh_rate_type
) {
5032 "DRRS requested for previously set RR...ignoring\n");
5036 if (!intel_crtc
->active
) {
5037 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5041 if (INTEL_INFO(dev
)->gen
>= 8 && !IS_CHERRYVIEW(dev
)) {
5044 intel_dp_set_m_n(intel_crtc
, M1_N1
);
5047 intel_dp_set_m_n(intel_crtc
, M2_N2
);
5051 DRM_ERROR("Unsupported refreshrate type\n");
5053 } else if (INTEL_INFO(dev
)->gen
> 6) {
5054 reg
= PIPECONF(intel_crtc
->config
->cpu_transcoder
);
5055 val
= I915_READ(reg
);
5057 if (index
> DRRS_HIGH_RR
) {
5058 if (IS_VALLEYVIEW(dev
))
5059 val
|= PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
5061 val
|= PIPECONF_EDP_RR_MODE_SWITCH
;
5063 if (IS_VALLEYVIEW(dev
))
5064 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
5066 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH
;
5068 I915_WRITE(reg
, val
);
5071 dev_priv
->drrs
.refresh_rate_type
= index
;
5073 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate
);
5077 * intel_edp_drrs_enable - init drrs struct if supported
5078 * @intel_dp: DP struct
5080 * Initializes frontbuffer_bits and drrs.dp
5082 void intel_edp_drrs_enable(struct intel_dp
*intel_dp
)
5084 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
5085 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5086 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5087 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
5088 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5090 if (!intel_crtc
->config
->has_drrs
) {
5091 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5095 mutex_lock(&dev_priv
->drrs
.mutex
);
5096 if (WARN_ON(dev_priv
->drrs
.dp
)) {
5097 DRM_ERROR("DRRS already enabled\n");
5101 dev_priv
->drrs
.busy_frontbuffer_bits
= 0;
5103 dev_priv
->drrs
.dp
= intel_dp
;
5106 mutex_unlock(&dev_priv
->drrs
.mutex
);
5110 * intel_edp_drrs_disable - Disable DRRS
5111 * @intel_dp: DP struct
5114 void intel_edp_drrs_disable(struct intel_dp
*intel_dp
)
5116 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
5117 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5118 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5119 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
5120 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5122 if (!intel_crtc
->config
->has_drrs
)
5125 mutex_lock(&dev_priv
->drrs
.mutex
);
5126 if (!dev_priv
->drrs
.dp
) {
5127 mutex_unlock(&dev_priv
->drrs
.mutex
);
5131 if (dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5132 intel_dp_set_drrs_state(dev_priv
->dev
,
5133 intel_dp
->attached_connector
->panel
.
5134 fixed_mode
->vrefresh
);
5136 dev_priv
->drrs
.dp
= NULL
;
5137 mutex_unlock(&dev_priv
->drrs
.mutex
);
5139 cancel_delayed_work_sync(&dev_priv
->drrs
.work
);
5142 static void intel_edp_drrs_downclock_work(struct work_struct
*work
)
5144 struct drm_i915_private
*dev_priv
=
5145 container_of(work
, typeof(*dev_priv
), drrs
.work
.work
);
5146 struct intel_dp
*intel_dp
;
5148 mutex_lock(&dev_priv
->drrs
.mutex
);
5150 intel_dp
= dev_priv
->drrs
.dp
;
5156 * The delayed work can race with an invalidate hence we need to
5160 if (dev_priv
->drrs
.busy_frontbuffer_bits
)
5163 if (dev_priv
->drrs
.refresh_rate_type
!= DRRS_LOW_RR
)
5164 intel_dp_set_drrs_state(dev_priv
->dev
,
5165 intel_dp
->attached_connector
->panel
.
5166 downclock_mode
->vrefresh
);
5169 mutex_unlock(&dev_priv
->drrs
.mutex
);
5173 * intel_edp_drrs_invalidate - Invalidate DRRS
5175 * @frontbuffer_bits: frontbuffer plane tracking bits
5177 * When there is a disturbance on screen (due to cursor movement/time
5178 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5181 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5183 void intel_edp_drrs_invalidate(struct drm_device
*dev
,
5184 unsigned frontbuffer_bits
)
5186 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5187 struct drm_crtc
*crtc
;
5190 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
5193 cancel_delayed_work(&dev_priv
->drrs
.work
);
5195 mutex_lock(&dev_priv
->drrs
.mutex
);
5196 if (!dev_priv
->drrs
.dp
) {
5197 mutex_unlock(&dev_priv
->drrs
.mutex
);
5201 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
5202 pipe
= to_intel_crtc(crtc
)->pipe
;
5204 if (dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
) {
5205 intel_dp_set_drrs_state(dev_priv
->dev
,
5206 dev_priv
->drrs
.dp
->attached_connector
->panel
.
5207 fixed_mode
->vrefresh
);
5210 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
5212 dev_priv
->drrs
.busy_frontbuffer_bits
|= frontbuffer_bits
;
5213 mutex_unlock(&dev_priv
->drrs
.mutex
);
5217 * intel_edp_drrs_flush - Flush DRRS
5219 * @frontbuffer_bits: frontbuffer plane tracking bits
5221 * When there is no movement on screen, DRRS work can be scheduled.
5222 * This DRRS work is responsible for setting relevant registers after a
5223 * timeout of 1 second.
5225 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5227 void intel_edp_drrs_flush(struct drm_device
*dev
,
5228 unsigned frontbuffer_bits
)
5230 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5231 struct drm_crtc
*crtc
;
5234 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
5237 cancel_delayed_work(&dev_priv
->drrs
.work
);
5239 mutex_lock(&dev_priv
->drrs
.mutex
);
5240 if (!dev_priv
->drrs
.dp
) {
5241 mutex_unlock(&dev_priv
->drrs
.mutex
);
5245 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
5246 pipe
= to_intel_crtc(crtc
)->pipe
;
5247 dev_priv
->drrs
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
5249 if (dev_priv
->drrs
.refresh_rate_type
!= DRRS_LOW_RR
&&
5250 !dev_priv
->drrs
.busy_frontbuffer_bits
)
5251 schedule_delayed_work(&dev_priv
->drrs
.work
,
5252 msecs_to_jiffies(1000));
5253 mutex_unlock(&dev_priv
->drrs
.mutex
);
5257 * DOC: Display Refresh Rate Switching (DRRS)
5259 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5260 * which enables swtching between low and high refresh rates,
5261 * dynamically, based on the usage scenario. This feature is applicable
5262 * for internal panels.
5264 * Indication that the panel supports DRRS is given by the panel EDID, which
5265 * would list multiple refresh rates for one resolution.
5267 * DRRS is of 2 types - static and seamless.
5268 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5269 * (may appear as a blink on screen) and is used in dock-undock scenario.
5270 * Seamless DRRS involves changing RR without any visual effect to the user
5271 * and can be used during normal system usage. This is done by programming
5272 * certain registers.
5274 * Support for static/seamless DRRS may be indicated in the VBT based on
5275 * inputs from the panel spec.
5277 * DRRS saves power by switching to low RR based on usage scenarios.
5280 * The implementation is based on frontbuffer tracking implementation.
5281 * When there is a disturbance on the screen triggered by user activity or a
5282 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5283 * When there is no movement on screen, after a timeout of 1 second, a switch
5284 * to low RR is made.
5285 * For integration with frontbuffer tracking code,
5286 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5288 * DRRS can be further extended to support other internal panels and also
5289 * the scenario of video playback wherein RR is set based on the rate
5290 * requested by userspace.
5294 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5295 * @intel_connector: eDP connector
5296 * @fixed_mode: preferred mode of panel
5298 * This function is called only once at driver load to initialize basic
5302 * Downclock mode if panel supports it, else return NULL.
5303 * DRRS support is determined by the presence of downclock mode (apart
5304 * from VBT setting).
5306 static struct drm_display_mode
*
5307 intel_dp_drrs_init(struct intel_connector
*intel_connector
,
5308 struct drm_display_mode
*fixed_mode
)
5310 struct drm_connector
*connector
= &intel_connector
->base
;
5311 struct drm_device
*dev
= connector
->dev
;
5312 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5313 struct drm_display_mode
*downclock_mode
= NULL
;
5315 INIT_DELAYED_WORK(&dev_priv
->drrs
.work
, intel_edp_drrs_downclock_work
);
5316 mutex_init(&dev_priv
->drrs
.mutex
);
5318 if (INTEL_INFO(dev
)->gen
<= 6) {
5319 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5323 if (dev_priv
->vbt
.drrs_type
!= SEAMLESS_DRRS_SUPPORT
) {
5324 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5328 downclock_mode
= intel_find_panel_downclock
5329 (dev
, fixed_mode
, connector
);
5331 if (!downclock_mode
) {
5332 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5336 dev_priv
->drrs
.type
= dev_priv
->vbt
.drrs_type
;
5338 dev_priv
->drrs
.refresh_rate_type
= DRRS_HIGH_RR
;
5339 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5340 return downclock_mode
;
5343 static bool intel_edp_init_connector(struct intel_dp
*intel_dp
,
5344 struct intel_connector
*intel_connector
)
5346 struct drm_connector
*connector
= &intel_connector
->base
;
5347 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
5348 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5349 struct drm_device
*dev
= intel_encoder
->base
.dev
;
5350 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5351 struct drm_display_mode
*fixed_mode
= NULL
;
5352 struct drm_display_mode
*downclock_mode
= NULL
;
5354 struct drm_display_mode
*scan
;
5356 enum pipe pipe
= INVALID_PIPE
;
5358 if (!is_edp(intel_dp
))
5362 intel_edp_panel_vdd_sanitize(intel_dp
);
5363 pps_unlock(intel_dp
);
5365 /* Cache DPCD and EDID for edp. */
5366 has_dpcd
= intel_dp_get_dpcd(intel_dp
);
5369 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11)
5370 dev_priv
->no_aux_handshake
=
5371 intel_dp
->dpcd
[DP_MAX_DOWNSPREAD
] &
5372 DP_NO_AUX_HANDSHAKE_LINK_TRAINING
;
5374 /* if this fails, presume the device is a ghost */
5375 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5379 /* We now know it's not a ghost, init power sequence regs. */
5381 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
5382 pps_unlock(intel_dp
);
5384 mutex_lock(&dev
->mode_config
.mutex
);
5385 edid
= drm_get_edid(connector
, &intel_dp
->aux
.ddc
);
5387 if (drm_add_edid_modes(connector
, edid
)) {
5388 drm_mode_connector_update_edid_property(connector
,
5390 drm_edid_to_eld(connector
, edid
);
5393 edid
= ERR_PTR(-EINVAL
);
5396 edid
= ERR_PTR(-ENOENT
);
5398 intel_connector
->edid
= edid
;
5400 /* prefer fixed mode from EDID if available */
5401 list_for_each_entry(scan
, &connector
->probed_modes
, head
) {
5402 if ((scan
->type
& DRM_MODE_TYPE_PREFERRED
)) {
5403 fixed_mode
= drm_mode_duplicate(dev
, scan
);
5404 downclock_mode
= intel_dp_drrs_init(
5405 intel_connector
, fixed_mode
);
5410 /* fallback to VBT if available for eDP */
5411 if (!fixed_mode
&& dev_priv
->vbt
.lfp_lvds_vbt_mode
) {
5412 fixed_mode
= drm_mode_duplicate(dev
,
5413 dev_priv
->vbt
.lfp_lvds_vbt_mode
);
5415 fixed_mode
->type
|= DRM_MODE_TYPE_PREFERRED
;
5417 mutex_unlock(&dev
->mode_config
.mutex
);
5419 if (IS_VALLEYVIEW(dev
)) {
5420 intel_dp
->edp_notifier
.notifier_call
= edp_notify_handler
;
5421 register_reboot_notifier(&intel_dp
->edp_notifier
);
5424 * Figure out the current pipe for the initial backlight setup.
5425 * If the current pipe isn't valid, try the PPS pipe, and if that
5426 * fails just assume pipe A.
5428 if (IS_CHERRYVIEW(dev
))
5429 pipe
= DP_PORT_TO_PIPE_CHV(intel_dp
->DP
);
5431 pipe
= PORT_TO_PIPE(intel_dp
->DP
);
5433 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
5434 pipe
= intel_dp
->pps_pipe
;
5436 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
5439 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5443 intel_panel_init(&intel_connector
->panel
, fixed_mode
, downclock_mode
);
5444 intel_connector
->panel
.backlight_power
= intel_edp_backlight_power
;
5445 intel_panel_setup_backlight(connector
, pipe
);
5451 intel_dp_init_connector(struct intel_digital_port
*intel_dig_port
,
5452 struct intel_connector
*intel_connector
)
5454 struct drm_connector
*connector
= &intel_connector
->base
;
5455 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
5456 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5457 struct drm_device
*dev
= intel_encoder
->base
.dev
;
5458 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5459 enum port port
= intel_dig_port
->port
;
5462 intel_dp
->pps_pipe
= INVALID_PIPE
;
5464 /* intel_dp vfuncs */
5465 if (INTEL_INFO(dev
)->gen
>= 9)
5466 intel_dp
->get_aux_clock_divider
= skl_get_aux_clock_divider
;
5467 else if (IS_VALLEYVIEW(dev
))
5468 intel_dp
->get_aux_clock_divider
= vlv_get_aux_clock_divider
;
5469 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
5470 intel_dp
->get_aux_clock_divider
= hsw_get_aux_clock_divider
;
5471 else if (HAS_PCH_SPLIT(dev
))
5472 intel_dp
->get_aux_clock_divider
= ilk_get_aux_clock_divider
;
5474 intel_dp
->get_aux_clock_divider
= i9xx_get_aux_clock_divider
;
5476 if (INTEL_INFO(dev
)->gen
>= 9)
5477 intel_dp
->get_aux_send_ctl
= skl_get_aux_send_ctl
;
5479 intel_dp
->get_aux_send_ctl
= i9xx_get_aux_send_ctl
;
5481 /* Preserve the current hw state. */
5482 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
);
5483 intel_dp
->attached_connector
= intel_connector
;
5485 if (intel_dp_is_edp(dev
, port
))
5486 type
= DRM_MODE_CONNECTOR_eDP
;
5488 type
= DRM_MODE_CONNECTOR_DisplayPort
;
5491 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5492 * for DP the encoder type can be set by the caller to
5493 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5495 if (type
== DRM_MODE_CONNECTOR_eDP
)
5496 intel_encoder
->type
= INTEL_OUTPUT_EDP
;
5498 /* eDP only on port B and/or C on vlv/chv */
5499 if (WARN_ON(IS_VALLEYVIEW(dev
) && is_edp(intel_dp
) &&
5500 port
!= PORT_B
&& port
!= PORT_C
))
5503 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5504 type
== DRM_MODE_CONNECTOR_eDP
? "eDP" : "DP",
5507 drm_connector_init(dev
, connector
, &intel_dp_connector_funcs
, type
);
5508 drm_connector_helper_add(connector
, &intel_dp_connector_helper_funcs
);
5510 connector
->interlace_allowed
= true;
5511 connector
->doublescan_allowed
= 0;
5513 INIT_DELAYED_WORK(&intel_dp
->panel_vdd_work
,
5514 edp_panel_vdd_work
);
5516 intel_connector_attach_encoder(intel_connector
, intel_encoder
);
5517 drm_connector_register(connector
);
5520 intel_connector
->get_hw_state
= intel_ddi_connector_get_hw_state
;
5522 intel_connector
->get_hw_state
= intel_connector_get_hw_state
;
5523 intel_connector
->unregister
= intel_dp_connector_unregister
;
5525 /* Set up the hotplug pin. */
5528 intel_encoder
->hpd_pin
= HPD_PORT_A
;
5531 intel_encoder
->hpd_pin
= HPD_PORT_B
;
5534 intel_encoder
->hpd_pin
= HPD_PORT_C
;
5537 intel_encoder
->hpd_pin
= HPD_PORT_D
;
5543 if (is_edp(intel_dp
)) {
5545 intel_dp_init_panel_power_timestamps(intel_dp
);
5546 if (IS_VALLEYVIEW(dev
))
5547 vlv_initial_power_sequencer_setup(intel_dp
);
5549 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
5550 pps_unlock(intel_dp
);
5553 intel_dp_aux_init(intel_dp
, intel_connector
);
5555 /* init MST on ports that can support it */
5556 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
) || INTEL_INFO(dev
)->gen
>= 9) {
5557 if (port
== PORT_B
|| port
== PORT_C
|| port
== PORT_D
) {
5558 intel_dp_mst_encoder_init(intel_dig_port
,
5559 intel_connector
->base
.base
.id
);
5563 if (!intel_edp_init_connector(intel_dp
, intel_connector
)) {
5564 drm_dp_aux_unregister(&intel_dp
->aux
);
5565 if (is_edp(intel_dp
)) {
5566 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
5568 * vdd might still be enabled do to the delayed vdd off.
5569 * Make sure vdd is actually turned off here.
5572 edp_panel_vdd_off_sync(intel_dp
);
5573 pps_unlock(intel_dp
);
5575 drm_connector_unregister(connector
);
5576 drm_connector_cleanup(connector
);
5580 intel_dp_add_properties(intel_dp
, connector
);
5582 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5583 * 0xd. Failure to do so will result in spurious interrupts being
5584 * generated on the port when a cable is not attached.
5586 if (IS_G4X(dev
) && !IS_GM45(dev
)) {
5587 u32 temp
= I915_READ(PEG_BAND_GAP_DATA
);
5588 I915_WRITE(PEG_BAND_GAP_DATA
, (temp
& ~0xf) | 0xd);
5595 intel_dp_init(struct drm_device
*dev
, int output_reg
, enum port port
)
5597 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5598 struct intel_digital_port
*intel_dig_port
;
5599 struct intel_encoder
*intel_encoder
;
5600 struct drm_encoder
*encoder
;
5601 struct intel_connector
*intel_connector
;
5603 intel_dig_port
= kzalloc(sizeof(*intel_dig_port
), GFP_KERNEL
);
5604 if (!intel_dig_port
)
5607 intel_connector
= intel_connector_alloc();
5608 if (!intel_connector
) {
5609 kfree(intel_dig_port
);
5613 intel_encoder
= &intel_dig_port
->base
;
5614 encoder
= &intel_encoder
->base
;
5616 drm_encoder_init(dev
, &intel_encoder
->base
, &intel_dp_enc_funcs
,
5617 DRM_MODE_ENCODER_TMDS
);
5619 intel_encoder
->compute_config
= intel_dp_compute_config
;
5620 intel_encoder
->disable
= intel_disable_dp
;
5621 intel_encoder
->get_hw_state
= intel_dp_get_hw_state
;
5622 intel_encoder
->get_config
= intel_dp_get_config
;
5623 intel_encoder
->suspend
= intel_dp_encoder_suspend
;
5624 if (IS_CHERRYVIEW(dev
)) {
5625 intel_encoder
->pre_pll_enable
= chv_dp_pre_pll_enable
;
5626 intel_encoder
->pre_enable
= chv_pre_enable_dp
;
5627 intel_encoder
->enable
= vlv_enable_dp
;
5628 intel_encoder
->post_disable
= chv_post_disable_dp
;
5629 } else if (IS_VALLEYVIEW(dev
)) {
5630 intel_encoder
->pre_pll_enable
= vlv_dp_pre_pll_enable
;
5631 intel_encoder
->pre_enable
= vlv_pre_enable_dp
;
5632 intel_encoder
->enable
= vlv_enable_dp
;
5633 intel_encoder
->post_disable
= vlv_post_disable_dp
;
5635 intel_encoder
->pre_enable
= g4x_pre_enable_dp
;
5636 intel_encoder
->enable
= g4x_enable_dp
;
5637 if (INTEL_INFO(dev
)->gen
>= 5)
5638 intel_encoder
->post_disable
= ilk_post_disable_dp
;
5641 intel_dig_port
->port
= port
;
5642 intel_dig_port
->dp
.output_reg
= output_reg
;
5644 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
5645 if (IS_CHERRYVIEW(dev
)) {
5647 intel_encoder
->crtc_mask
= 1 << 2;
5649 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1);
5651 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1) | (1 << 2);
5653 intel_encoder
->cloneable
= 0;
5654 intel_encoder
->hot_plug
= intel_dp_hot_plug
;
5656 intel_dig_port
->hpd_pulse
= intel_dp_hpd_pulse
;
5657 dev_priv
->hpd_irq_port
[port
] = intel_dig_port
;
5659 if (!intel_dp_init_connector(intel_dig_port
, intel_connector
)) {
5660 drm_encoder_cleanup(encoder
);
5661 kfree(intel_dig_port
);
5662 kfree(intel_connector
);
5666 void intel_dp_mst_suspend(struct drm_device
*dev
)
5668 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5672 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
5673 struct intel_digital_port
*intel_dig_port
= dev_priv
->hpd_irq_port
[i
];
5674 if (!intel_dig_port
)
5677 if (intel_dig_port
->base
.type
== INTEL_OUTPUT_DISPLAYPORT
) {
5678 if (!intel_dig_port
->dp
.can_mst
)
5680 if (intel_dig_port
->dp
.is_mst
)
5681 drm_dp_mst_topology_mgr_suspend(&intel_dig_port
->dp
.mst_mgr
);
5686 void intel_dp_mst_resume(struct drm_device
*dev
)
5688 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5691 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
5692 struct intel_digital_port
*intel_dig_port
= dev_priv
->hpd_irq_port
[i
];
5693 if (!intel_dig_port
)
5695 if (intel_dig_port
->base
.type
== INTEL_OUTPUT_DISPLAYPORT
) {
5698 if (!intel_dig_port
->dp
.can_mst
)
5701 ret
= drm_dp_mst_topology_mgr_resume(&intel_dig_port
->dp
.mst_mgr
);
5703 intel_dp_check_mst_status(&intel_dig_port
->dp
);