2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Panel Self Refresh (PSR/SRD)
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
34 * Panel Self Refresh must be supported by both Hardware (source) and
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
56 #include "intel_drv.h"
59 void intel_psr_irq_control(struct drm_i915_private
*dev_priv
, bool debug
)
63 mask
= EDP_PSR_ERROR(TRANSCODER_EDP
);
64 debug_mask
= EDP_PSR_POST_EXIT(TRANSCODER_EDP
) |
65 EDP_PSR_PRE_ENTRY(TRANSCODER_EDP
);
67 if (INTEL_GEN(dev_priv
) >= 8) {
68 mask
|= EDP_PSR_ERROR(TRANSCODER_A
) |
69 EDP_PSR_ERROR(TRANSCODER_B
) |
70 EDP_PSR_ERROR(TRANSCODER_C
);
72 debug_mask
|= EDP_PSR_POST_EXIT(TRANSCODER_A
) |
73 EDP_PSR_PRE_ENTRY(TRANSCODER_A
) |
74 EDP_PSR_POST_EXIT(TRANSCODER_B
) |
75 EDP_PSR_PRE_ENTRY(TRANSCODER_B
) |
76 EDP_PSR_POST_EXIT(TRANSCODER_C
) |
77 EDP_PSR_PRE_ENTRY(TRANSCODER_C
);
83 WRITE_ONCE(dev_priv
->psr
.debug
, debug
);
84 I915_WRITE(EDP_PSR_IMR
, ~mask
);
87 static void psr_event_print(u32 val
, bool psr2_enabled
)
89 DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val
);
90 if (val
& PSR_EVENT_PSR2_WD_TIMER_EXPIRE
)
91 DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
92 if ((val
& PSR_EVENT_PSR2_DISABLED
) && psr2_enabled
)
93 DRM_DEBUG_KMS("\tPSR2 disabled\n");
94 if (val
& PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN
)
95 DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
96 if (val
& PSR_EVENT_SU_CRC_FIFO_UNDERRUN
)
97 DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
98 if (val
& PSR_EVENT_GRAPHICS_RESET
)
99 DRM_DEBUG_KMS("\tGraphics reset\n");
100 if (val
& PSR_EVENT_PCH_INTERRUPT
)
101 DRM_DEBUG_KMS("\tPCH interrupt\n");
102 if (val
& PSR_EVENT_MEMORY_UP
)
103 DRM_DEBUG_KMS("\tMemory up\n");
104 if (val
& PSR_EVENT_FRONT_BUFFER_MODIFY
)
105 DRM_DEBUG_KMS("\tFront buffer modification\n");
106 if (val
& PSR_EVENT_WD_TIMER_EXPIRE
)
107 DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
108 if (val
& PSR_EVENT_PIPE_REGISTERS_UPDATE
)
109 DRM_DEBUG_KMS("\tPIPE registers updated\n");
110 if (val
& PSR_EVENT_REGISTER_UPDATE
)
111 DRM_DEBUG_KMS("\tRegister updated\n");
112 if (val
& PSR_EVENT_HDCP_ENABLE
)
113 DRM_DEBUG_KMS("\tHDCP enabled\n");
114 if (val
& PSR_EVENT_KVMR_SESSION_ENABLE
)
115 DRM_DEBUG_KMS("\tKVMR session enabled\n");
116 if (val
& PSR_EVENT_VBI_ENABLE
)
117 DRM_DEBUG_KMS("\tVBI enabled\n");
118 if (val
& PSR_EVENT_LPSP_MODE_EXIT
)
119 DRM_DEBUG_KMS("\tLPSP mode exited\n");
120 if ((val
& PSR_EVENT_PSR_DISABLE
) && !psr2_enabled
)
121 DRM_DEBUG_KMS("\tPSR disabled\n");
124 void intel_psr_irq_handler(struct drm_i915_private
*dev_priv
, u32 psr_iir
)
126 u32 transcoders
= BIT(TRANSCODER_EDP
);
127 enum transcoder cpu_transcoder
;
128 ktime_t time_ns
= ktime_get();
130 if (INTEL_GEN(dev_priv
) >= 8)
131 transcoders
|= BIT(TRANSCODER_A
) |
135 for_each_cpu_transcoder_masked(dev_priv
, cpu_transcoder
, transcoders
) {
136 /* FIXME: Exit PSR and link train manually when this happens. */
137 if (psr_iir
& EDP_PSR_ERROR(cpu_transcoder
))
138 DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
139 transcoder_name(cpu_transcoder
));
141 if (psr_iir
& EDP_PSR_PRE_ENTRY(cpu_transcoder
)) {
142 dev_priv
->psr
.last_entry_attempt
= time_ns
;
143 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
144 transcoder_name(cpu_transcoder
));
147 if (psr_iir
& EDP_PSR_POST_EXIT(cpu_transcoder
)) {
148 dev_priv
->psr
.last_exit
= time_ns
;
149 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
150 transcoder_name(cpu_transcoder
));
152 if (INTEL_GEN(dev_priv
) >= 9) {
153 u32 val
= I915_READ(PSR_EVENT(cpu_transcoder
));
154 bool psr2_enabled
= dev_priv
->psr
.psr2_enabled
;
156 I915_WRITE(PSR_EVENT(cpu_transcoder
), val
);
157 psr_event_print(val
, psr2_enabled
);
163 static bool intel_dp_get_colorimetry_status(struct intel_dp
*intel_dp
)
167 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_DPRX_FEATURE_ENUMERATION_LIST
,
170 return dprx
& DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED
;
173 static bool intel_dp_get_alpm_status(struct intel_dp
*intel_dp
)
175 uint8_t alpm_caps
= 0;
177 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_RECEIVER_ALPM_CAP
,
180 return alpm_caps
& DP_ALPM_CAP
;
183 static u8
intel_dp_get_sink_sync_latency(struct intel_dp
*intel_dp
)
185 u8 val
= 8; /* assume the worst if we can't read the value */
187 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
188 DP_SYNCHRONIZATION_LATENCY_IN_SINK
, &val
) == 1)
189 val
&= DP_MAX_RESYNC_FRAME_COUNT_MASK
;
191 DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
195 void intel_psr_init_dpcd(struct intel_dp
*intel_dp
)
197 struct drm_i915_private
*dev_priv
=
198 to_i915(dp_to_dig_port(intel_dp
)->base
.base
.dev
);
200 drm_dp_dpcd_read(&intel_dp
->aux
, DP_PSR_SUPPORT
, intel_dp
->psr_dpcd
,
201 sizeof(intel_dp
->psr_dpcd
));
203 if (!intel_dp
->psr_dpcd
[0])
205 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
206 intel_dp
->psr_dpcd
[0]);
208 if (!(intel_dp
->edp_dpcd
[1] & DP_EDP_SET_POWER_CAP
)) {
209 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
212 dev_priv
->psr
.sink_support
= true;
213 dev_priv
->psr
.sink_sync_latency
=
214 intel_dp_get_sink_sync_latency(intel_dp
);
216 if (INTEL_GEN(dev_priv
) >= 9 &&
217 (intel_dp
->psr_dpcd
[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED
)) {
218 bool y_req
= intel_dp
->psr_dpcd
[1] &
219 DP_PSR2_SU_Y_COORDINATE_REQUIRED
;
220 bool alpm
= intel_dp_get_alpm_status(intel_dp
);
223 * All panels that supports PSR version 03h (PSR2 +
224 * Y-coordinate) can handle Y-coordinates in VSC but we are
225 * only sure that it is going to be used when required by the
226 * panel. This way panel is capable to do selective update
227 * without a aux frame sync.
229 * To support PSR version 02h and PSR version 03h without
230 * Y-coordinate requirement panels we would need to enable
233 dev_priv
->psr
.sink_psr2_support
= y_req
&& alpm
;
234 DRM_DEBUG_KMS("PSR2 %ssupported\n",
235 dev_priv
->psr
.sink_psr2_support
? "" : "not ");
237 if (dev_priv
->psr
.sink_psr2_support
) {
238 dev_priv
->psr
.colorimetry_support
=
239 intel_dp_get_colorimetry_status(intel_dp
);
244 static void intel_psr_setup_vsc(struct intel_dp
*intel_dp
,
245 const struct intel_crtc_state
*crtc_state
)
247 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
248 struct drm_i915_private
*dev_priv
= to_i915(intel_dig_port
->base
.base
.dev
);
249 struct edp_vsc_psr psr_vsc
;
251 if (dev_priv
->psr
.psr2_enabled
) {
252 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
253 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
254 psr_vsc
.sdp_header
.HB0
= 0;
255 psr_vsc
.sdp_header
.HB1
= 0x7;
256 if (dev_priv
->psr
.colorimetry_support
) {
257 psr_vsc
.sdp_header
.HB2
= 0x5;
258 psr_vsc
.sdp_header
.HB3
= 0x13;
260 psr_vsc
.sdp_header
.HB2
= 0x4;
261 psr_vsc
.sdp_header
.HB3
= 0xe;
264 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
265 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
266 psr_vsc
.sdp_header
.HB0
= 0;
267 psr_vsc
.sdp_header
.HB1
= 0x7;
268 psr_vsc
.sdp_header
.HB2
= 0x2;
269 psr_vsc
.sdp_header
.HB3
= 0x8;
272 intel_dig_port
->write_infoframe(&intel_dig_port
->base
.base
, crtc_state
,
273 DP_SDP_VSC
, &psr_vsc
, sizeof(psr_vsc
));
276 static void hsw_psr_setup_aux(struct intel_dp
*intel_dp
)
278 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
279 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
280 u32 aux_clock_divider
, aux_ctl
;
282 static const uint8_t aux_msg
[] = {
283 [0] = DP_AUX_NATIVE_WRITE
<< 4,
284 [1] = DP_SET_POWER
>> 8,
285 [2] = DP_SET_POWER
& 0xff,
287 [4] = DP_SET_POWER_D0
,
289 u32 psr_aux_mask
= EDP_PSR_AUX_CTL_TIME_OUT_MASK
|
290 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK
|
291 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK
|
292 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK
;
294 BUILD_BUG_ON(sizeof(aux_msg
) > 20);
295 for (i
= 0; i
< sizeof(aux_msg
); i
+= 4)
296 I915_WRITE(EDP_PSR_AUX_DATA(i
>> 2),
297 intel_dp_pack_aux(&aux_msg
[i
], sizeof(aux_msg
) - i
));
299 aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, 0);
301 /* Start with bits set for DDI_AUX_CTL register */
302 aux_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
, sizeof(aux_msg
),
305 /* Select only valid bits for SRD_AUX_CTL */
306 aux_ctl
&= psr_aux_mask
;
307 I915_WRITE(EDP_PSR_AUX_CTL
, aux_ctl
);
310 static void intel_psr_enable_sink(struct intel_dp
*intel_dp
)
312 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
313 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
314 struct drm_i915_private
*dev_priv
= to_i915(dev
);
315 u8 dpcd_val
= DP_PSR_ENABLE
;
317 /* Enable ALPM at sink for psr2 */
318 if (dev_priv
->psr
.psr2_enabled
) {
319 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_RECEIVER_ALPM_CONFIG
,
321 dpcd_val
|= DP_PSR_ENABLE_PSR2
;
324 if (dev_priv
->psr
.link_standby
)
325 dpcd_val
|= DP_PSR_MAIN_LINK_ACTIVE
;
326 if (!dev_priv
->psr
.psr2_enabled
&& INTEL_GEN(dev_priv
) >= 8)
327 dpcd_val
|= DP_PSR_CRC_VERIFICATION
;
328 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
, dpcd_val
);
330 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
, DP_SET_POWER_D0
);
333 static void hsw_activate_psr1(struct intel_dp
*intel_dp
)
335 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
336 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
337 struct drm_i915_private
*dev_priv
= to_i915(dev
);
338 u32 max_sleep_time
= 0x1f;
339 u32 val
= EDP_PSR_ENABLE
;
341 /* Let's use 6 as the minimum to cover all known cases including the
342 * off-by-one issue that HW has in some cases.
344 int idle_frames
= max(6, dev_priv
->vbt
.psr
.idle_frames
);
346 /* sink_sync_latency of 8 means source has to wait for more than 8
347 * frames, we'll go with 9 frames for now
349 idle_frames
= max(idle_frames
, dev_priv
->psr
.sink_sync_latency
+ 1);
350 val
|= idle_frames
<< EDP_PSR_IDLE_FRAME_SHIFT
;
352 val
|= max_sleep_time
<< EDP_PSR_MAX_SLEEP_TIME_SHIFT
;
353 if (IS_HASWELL(dev_priv
))
354 val
|= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES
;
356 if (dev_priv
->psr
.link_standby
)
357 val
|= EDP_PSR_LINK_STANDBY
;
359 if (dev_priv
->vbt
.psr
.tp1_wakeup_time_us
== 0)
360 val
|= EDP_PSR_TP1_TIME_0us
;
361 else if (dev_priv
->vbt
.psr
.tp1_wakeup_time_us
<= 100)
362 val
|= EDP_PSR_TP1_TIME_100us
;
363 else if (dev_priv
->vbt
.psr
.tp1_wakeup_time_us
<= 500)
364 val
|= EDP_PSR_TP1_TIME_500us
;
366 val
|= EDP_PSR_TP1_TIME_2500us
;
368 if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
== 0)
369 val
|= EDP_PSR_TP2_TP3_TIME_0us
;
370 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 100)
371 val
|= EDP_PSR_TP2_TP3_TIME_100us
;
372 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 500)
373 val
|= EDP_PSR_TP2_TP3_TIME_500us
;
375 val
|= EDP_PSR_TP2_TP3_TIME_2500us
;
377 if (intel_dp_source_supports_hbr2(intel_dp
) &&
378 drm_dp_tps3_supported(intel_dp
->dpcd
))
379 val
|= EDP_PSR_TP1_TP3_SEL
;
381 val
|= EDP_PSR_TP1_TP2_SEL
;
383 if (INTEL_GEN(dev_priv
) >= 8)
384 val
|= EDP_PSR_CRC_ENABLE
;
386 val
|= I915_READ(EDP_PSR_CTL
) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK
;
387 I915_WRITE(EDP_PSR_CTL
, val
);
390 static void hsw_activate_psr2(struct intel_dp
*intel_dp
)
392 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
393 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
394 struct drm_i915_private
*dev_priv
= to_i915(dev
);
397 /* Let's use 6 as the minimum to cover all known cases including the
398 * off-by-one issue that HW has in some cases.
400 int idle_frames
= max(6, dev_priv
->vbt
.psr
.idle_frames
);
402 idle_frames
= max(idle_frames
, dev_priv
->psr
.sink_sync_latency
+ 1);
403 val
= idle_frames
<< EDP_PSR2_IDLE_FRAME_SHIFT
;
405 /* FIXME: selective update is probably totally broken because it doesn't
406 * mesh at all with our frontbuffer tracking. And the hw alone isn't
408 val
|= EDP_PSR2_ENABLE
| EDP_SU_TRACK_ENABLE
;
409 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
))
410 val
|= EDP_Y_COORDINATE_ENABLE
;
412 val
|= EDP_PSR2_FRAME_BEFORE_SU(dev_priv
->psr
.sink_sync_latency
+ 1);
414 if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
>= 0 &&
415 dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 50)
416 val
|= EDP_PSR2_TP2_TIME_50us
;
417 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 100)
418 val
|= EDP_PSR2_TP2_TIME_100us
;
419 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 500)
420 val
|= EDP_PSR2_TP2_TIME_500us
;
422 val
|= EDP_PSR2_TP2_TIME_2500us
;
424 I915_WRITE(EDP_PSR2_CTL
, val
);
427 static bool intel_psr2_config_valid(struct intel_dp
*intel_dp
,
428 struct intel_crtc_state
*crtc_state
)
430 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
431 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
432 int crtc_hdisplay
= crtc_state
->base
.adjusted_mode
.crtc_hdisplay
;
433 int crtc_vdisplay
= crtc_state
->base
.adjusted_mode
.crtc_vdisplay
;
434 int psr_max_h
= 0, psr_max_v
= 0;
437 * FIXME psr2_support is messed up. It's both computed
438 * dynamically during PSR enable, and extracted from sink
439 * caps during eDP detection.
441 if (!dev_priv
->psr
.sink_psr2_support
)
444 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
)) {
447 } else if (IS_GEN9(dev_priv
)) {
452 if (crtc_hdisplay
> psr_max_h
|| crtc_vdisplay
> psr_max_v
) {
453 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
454 crtc_hdisplay
, crtc_vdisplay
,
455 psr_max_h
, psr_max_v
);
462 void intel_psr_compute_config(struct intel_dp
*intel_dp
,
463 struct intel_crtc_state
*crtc_state
)
465 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
466 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
467 const struct drm_display_mode
*adjusted_mode
=
468 &crtc_state
->base
.adjusted_mode
;
471 if (!CAN_PSR(dev_priv
))
474 if (!i915_modparams
.enable_psr
) {
475 DRM_DEBUG_KMS("PSR disable by flag\n");
480 * HSW spec explicitly says PSR is tied to port A.
481 * BDW+ platforms with DDI implementation of PSR have different
482 * PSR registers per transcoder and we only implement transcoder EDP
483 * ones. Since by Display design transcoder EDP is tied to port A
484 * we can safely escape based on the port A.
486 if (dig_port
->base
.port
!= PORT_A
) {
487 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
491 if (IS_HASWELL(dev_priv
) &&
492 I915_READ(HSW_STEREO_3D_CTL(crtc_state
->cpu_transcoder
)) &
494 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
498 if (IS_HASWELL(dev_priv
) &&
499 adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
500 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
504 psr_setup_time
= drm_dp_psr_setup_time(intel_dp
->psr_dpcd
);
505 if (psr_setup_time
< 0) {
506 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
507 intel_dp
->psr_dpcd
[1]);
511 if (intel_usecs_to_scanlines(adjusted_mode
, psr_setup_time
) >
512 adjusted_mode
->crtc_vtotal
- adjusted_mode
->crtc_vdisplay
- 1) {
513 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
518 crtc_state
->has_psr
= true;
519 crtc_state
->has_psr2
= intel_psr2_config_valid(intel_dp
, crtc_state
);
520 DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state
->has_psr2
? "2" : "");
523 static void intel_psr_activate(struct intel_dp
*intel_dp
)
525 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
526 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
527 struct drm_i915_private
*dev_priv
= to_i915(dev
);
529 if (INTEL_GEN(dev_priv
) >= 9)
530 WARN_ON(I915_READ(EDP_PSR2_CTL
) & EDP_PSR2_ENABLE
);
531 WARN_ON(I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
);
532 WARN_ON(dev_priv
->psr
.active
);
533 lockdep_assert_held(&dev_priv
->psr
.lock
);
535 /* psr1 and psr2 are mutually exclusive.*/
536 if (dev_priv
->psr
.psr2_enabled
)
537 hsw_activate_psr2(intel_dp
);
539 hsw_activate_psr1(intel_dp
);
541 dev_priv
->psr
.active
= true;
544 static void intel_psr_enable_source(struct intel_dp
*intel_dp
,
545 const struct intel_crtc_state
*crtc_state
)
547 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
548 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
549 struct drm_i915_private
*dev_priv
= to_i915(dev
);
550 enum transcoder cpu_transcoder
= crtc_state
->cpu_transcoder
;
552 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
553 * use hardcoded values PSR AUX transactions
555 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
556 hsw_psr_setup_aux(intel_dp
);
558 if (dev_priv
->psr
.psr2_enabled
) {
559 u32 chicken
= I915_READ(CHICKEN_TRANS(cpu_transcoder
));
561 if (INTEL_GEN(dev_priv
) == 9 && !IS_GEMINILAKE(dev_priv
))
562 chicken
|= (PSR2_VSC_ENABLE_PROG_HEADER
563 | PSR2_ADD_VERTICAL_LINE_COUNT
);
566 chicken
&= ~VSC_DATA_SEL_SOFTWARE_CONTROL
;
567 I915_WRITE(CHICKEN_TRANS(cpu_transcoder
), chicken
);
569 I915_WRITE(EDP_PSR_DEBUG
,
570 EDP_PSR_DEBUG_MASK_MEMUP
|
571 EDP_PSR_DEBUG_MASK_HPD
|
572 EDP_PSR_DEBUG_MASK_LPSP
|
573 EDP_PSR_DEBUG_MASK_MAX_SLEEP
|
574 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE
);
577 * Per Spec: Avoid continuous PSR exit by masking MEMUP
578 * and HPD. also mask LPSP to avoid dependency on other
579 * drivers that might block runtime_pm besides
580 * preventing other hw tracking issues now we can rely
581 * on frontbuffer tracking.
583 I915_WRITE(EDP_PSR_DEBUG
,
584 EDP_PSR_DEBUG_MASK_MEMUP
|
585 EDP_PSR_DEBUG_MASK_HPD
|
586 EDP_PSR_DEBUG_MASK_LPSP
|
587 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE
|
588 EDP_PSR_DEBUG_MASK_MAX_SLEEP
);
593 * intel_psr_enable - Enable PSR
594 * @intel_dp: Intel DP
595 * @crtc_state: new CRTC state
597 * This function can only be called after the pipe is fully trained and enabled.
599 void intel_psr_enable(struct intel_dp
*intel_dp
,
600 const struct intel_crtc_state
*crtc_state
)
602 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
603 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
604 struct drm_i915_private
*dev_priv
= to_i915(dev
);
606 if (!crtc_state
->has_psr
)
609 if (WARN_ON(!CAN_PSR(dev_priv
)))
612 WARN_ON(dev_priv
->drrs
.dp
);
613 mutex_lock(&dev_priv
->psr
.lock
);
614 if (dev_priv
->psr
.enabled
) {
615 DRM_DEBUG_KMS("PSR already in use\n");
619 dev_priv
->psr
.psr2_enabled
= crtc_state
->has_psr2
;
620 dev_priv
->psr
.busy_frontbuffer_bits
= 0;
622 intel_psr_setup_vsc(intel_dp
, crtc_state
);
623 intel_psr_enable_sink(intel_dp
);
624 intel_psr_enable_source(intel_dp
, crtc_state
);
625 dev_priv
->psr
.enabled
= intel_dp
;
627 intel_psr_activate(intel_dp
);
630 mutex_unlock(&dev_priv
->psr
.lock
);
634 intel_psr_disable_source(struct intel_dp
*intel_dp
)
636 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
637 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
638 struct drm_i915_private
*dev_priv
= to_i915(dev
);
640 if (dev_priv
->psr
.active
) {
641 i915_reg_t psr_status
;
644 if (dev_priv
->psr
.psr2_enabled
) {
645 psr_status
= EDP_PSR2_STATUS
;
646 psr_status_mask
= EDP_PSR2_STATUS_STATE_MASK
;
648 I915_WRITE(EDP_PSR2_CTL
,
649 I915_READ(EDP_PSR2_CTL
) &
650 ~(EDP_PSR2_ENABLE
| EDP_SU_TRACK_ENABLE
));
653 psr_status
= EDP_PSR_STATUS
;
654 psr_status_mask
= EDP_PSR_STATUS_STATE_MASK
;
656 I915_WRITE(EDP_PSR_CTL
,
657 I915_READ(EDP_PSR_CTL
) & ~EDP_PSR_ENABLE
);
660 /* Wait till PSR is idle */
661 if (intel_wait_for_register(dev_priv
,
662 psr_status
, psr_status_mask
, 0,
664 DRM_ERROR("Timed out waiting for PSR Idle State\n");
666 dev_priv
->psr
.active
= false;
668 if (dev_priv
->psr
.psr2_enabled
)
669 WARN_ON(I915_READ(EDP_PSR2_CTL
) & EDP_PSR2_ENABLE
);
671 WARN_ON(I915_READ(EDP_PSR_CTL
) & EDP_PSR_ENABLE
);
675 static void intel_psr_disable_locked(struct intel_dp
*intel_dp
)
677 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
678 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
679 struct drm_i915_private
*dev_priv
= to_i915(dev
);
681 lockdep_assert_held(&dev_priv
->psr
.lock
);
683 if (!dev_priv
->psr
.enabled
)
686 intel_psr_disable_source(intel_dp
);
688 /* Disable PSR on Sink */
689 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
, 0);
691 dev_priv
->psr
.enabled
= NULL
;
695 * intel_psr_disable - Disable PSR
696 * @intel_dp: Intel DP
697 * @old_crtc_state: old CRTC state
699 * This function needs to be called before disabling pipe.
701 void intel_psr_disable(struct intel_dp
*intel_dp
,
702 const struct intel_crtc_state
*old_crtc_state
)
704 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
705 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
706 struct drm_i915_private
*dev_priv
= to_i915(dev
);
708 if (!old_crtc_state
->has_psr
)
711 if (WARN_ON(!CAN_PSR(dev_priv
)))
714 mutex_lock(&dev_priv
->psr
.lock
);
715 intel_psr_disable_locked(intel_dp
);
716 mutex_unlock(&dev_priv
->psr
.lock
);
717 cancel_work_sync(&dev_priv
->psr
.work
);
720 int intel_psr_wait_for_idle(const struct intel_crtc_state
*new_crtc_state
)
722 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->base
.crtc
);
723 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
727 if (!new_crtc_state
->has_psr
)
731 * The sole user right now is intel_pipe_update_start(),
732 * which won't race with psr_enable/disable, which is
733 * where psr2_enabled is written to. So, we don't need
734 * to acquire the psr.lock. More importantly, we want the
735 * latency inside intel_pipe_update_start() to be as low
736 * as possible, so no need to acquire psr.lock when it is
737 * not needed and will induce latencies in the atomic
740 if (dev_priv
->psr
.psr2_enabled
) {
741 reg
= EDP_PSR2_STATUS
;
742 mask
= EDP_PSR2_STATUS_STATE_MASK
;
744 reg
= EDP_PSR_STATUS
;
745 mask
= EDP_PSR_STATUS_STATE_MASK
;
749 * Max time for PSR to idle = Inverse of the refresh rate +
750 * 6 ms of exit training time + 1.5 ms of aux channel
751 * handshake. 50 msec is defesive enough to cover everything.
753 return intel_wait_for_register(dev_priv
, reg
, mask
,
754 EDP_PSR_STATUS_STATE_IDLE
, 50);
757 static bool __psr_wait_for_idle_locked(struct drm_i915_private
*dev_priv
)
759 struct intel_dp
*intel_dp
;
764 intel_dp
= dev_priv
->psr
.enabled
;
768 if (dev_priv
->psr
.psr2_enabled
) {
769 reg
= EDP_PSR2_STATUS
;
770 mask
= EDP_PSR2_STATUS_STATE_MASK
;
772 reg
= EDP_PSR_STATUS
;
773 mask
= EDP_PSR_STATUS_STATE_MASK
;
776 mutex_unlock(&dev_priv
->psr
.lock
);
778 err
= intel_wait_for_register(dev_priv
, reg
, mask
, 0, 50);
780 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
782 /* After the unlocked wait, verify that PSR is still wanted! */
783 mutex_lock(&dev_priv
->psr
.lock
);
784 return err
== 0 && dev_priv
->psr
.enabled
;
787 static void intel_psr_work(struct work_struct
*work
)
789 struct drm_i915_private
*dev_priv
=
790 container_of(work
, typeof(*dev_priv
), psr
.work
);
792 mutex_lock(&dev_priv
->psr
.lock
);
794 if (!dev_priv
->psr
.enabled
)
798 * We have to make sure PSR is ready for re-enable
799 * otherwise it keeps disabled until next full enable/disable cycle.
800 * PSR might take some time to get fully disabled
801 * and be ready for re-enable.
803 if (!__psr_wait_for_idle_locked(dev_priv
))
807 * The delayed work can race with an invalidate hence we need to
808 * recheck. Since psr_flush first clears this and then reschedules we
809 * won't ever miss a flush when bailing out here.
811 if (dev_priv
->psr
.busy_frontbuffer_bits
|| dev_priv
->psr
.active
)
814 intel_psr_activate(dev_priv
->psr
.enabled
);
816 mutex_unlock(&dev_priv
->psr
.lock
);
819 static void intel_psr_exit(struct drm_i915_private
*dev_priv
)
823 if (!dev_priv
->psr
.active
)
826 if (dev_priv
->psr
.psr2_enabled
) {
827 val
= I915_READ(EDP_PSR2_CTL
);
828 WARN_ON(!(val
& EDP_PSR2_ENABLE
));
829 I915_WRITE(EDP_PSR2_CTL
, val
& ~EDP_PSR2_ENABLE
);
831 val
= I915_READ(EDP_PSR_CTL
);
832 WARN_ON(!(val
& EDP_PSR_ENABLE
));
833 I915_WRITE(EDP_PSR_CTL
, val
& ~EDP_PSR_ENABLE
);
835 dev_priv
->psr
.active
= false;
839 * intel_psr_invalidate - Invalidade PSR
840 * @dev_priv: i915 device
841 * @frontbuffer_bits: frontbuffer plane tracking bits
842 * @origin: which operation caused the invalidate
844 * Since the hardware frontbuffer tracking has gaps we need to integrate
845 * with the software frontbuffer tracking. This function gets called every
846 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
847 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
849 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
851 void intel_psr_invalidate(struct drm_i915_private
*dev_priv
,
852 unsigned frontbuffer_bits
, enum fb_op_origin origin
)
854 struct drm_crtc
*crtc
;
857 if (!CAN_PSR(dev_priv
))
860 if (origin
== ORIGIN_FLIP
)
863 mutex_lock(&dev_priv
->psr
.lock
);
864 if (!dev_priv
->psr
.enabled
) {
865 mutex_unlock(&dev_priv
->psr
.lock
);
869 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
870 pipe
= to_intel_crtc(crtc
)->pipe
;
872 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
873 dev_priv
->psr
.busy_frontbuffer_bits
|= frontbuffer_bits
;
875 if (frontbuffer_bits
)
876 intel_psr_exit(dev_priv
);
878 mutex_unlock(&dev_priv
->psr
.lock
);
882 * intel_psr_flush - Flush PSR
883 * @dev_priv: i915 device
884 * @frontbuffer_bits: frontbuffer plane tracking bits
885 * @origin: which operation caused the flush
887 * Since the hardware frontbuffer tracking has gaps we need to integrate
888 * with the software frontbuffer tracking. This function gets called every
889 * time frontbuffer rendering has completed and flushed out to memory. PSR
890 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
892 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
894 void intel_psr_flush(struct drm_i915_private
*dev_priv
,
895 unsigned frontbuffer_bits
, enum fb_op_origin origin
)
897 struct drm_crtc
*crtc
;
900 if (!CAN_PSR(dev_priv
))
903 if (origin
== ORIGIN_FLIP
)
906 mutex_lock(&dev_priv
->psr
.lock
);
907 if (!dev_priv
->psr
.enabled
) {
908 mutex_unlock(&dev_priv
->psr
.lock
);
912 crtc
= dp_to_dig_port(dev_priv
->psr
.enabled
)->base
.base
.crtc
;
913 pipe
= to_intel_crtc(crtc
)->pipe
;
915 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
916 dev_priv
->psr
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
918 /* By definition flush = invalidate + flush */
919 if (frontbuffer_bits
) {
920 if (dev_priv
->psr
.psr2_enabled
) {
921 intel_psr_exit(dev_priv
);
924 * Display WA #0884: all
925 * This documented WA for bxt can be safely applied
926 * broadly so we can force HW tracking to exit PSR
927 * instead of disabling and re-enabling.
928 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
929 * but it makes more sense write to the current active
932 I915_WRITE(CURSURFLIVE(pipe
), 0);
936 if (!dev_priv
->psr
.active
&& !dev_priv
->psr
.busy_frontbuffer_bits
)
937 schedule_work(&dev_priv
->psr
.work
);
938 mutex_unlock(&dev_priv
->psr
.lock
);
942 * intel_psr_init - Init basic PSR work and mutex.
943 * @dev_priv: i915 device private
945 * This function is called only once at driver load to initialize basic
948 void intel_psr_init(struct drm_i915_private
*dev_priv
)
950 if (!HAS_PSR(dev_priv
))
953 dev_priv
->psr_mmio_base
= IS_HASWELL(dev_priv
) ?
954 HSW_EDP_PSR_BASE
: BDW_EDP_PSR_BASE
;
956 if (!dev_priv
->psr
.sink_support
)
959 if (i915_modparams
.enable_psr
== -1) {
960 i915_modparams
.enable_psr
= dev_priv
->vbt
.psr
.enable
;
962 /* Per platform default: all disabled. */
963 i915_modparams
.enable_psr
= 0;
966 /* Set link_standby x link_off defaults */
967 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
968 /* HSW and BDW require workarounds that we don't implement. */
969 dev_priv
->psr
.link_standby
= false;
971 /* For new platforms let's respect VBT back again */
972 dev_priv
->psr
.link_standby
= dev_priv
->vbt
.psr
.full_link
;
974 INIT_WORK(&dev_priv
->psr
.work
, intel_psr_work
);
975 mutex_init(&dev_priv
->psr
.lock
);
978 void intel_psr_short_pulse(struct intel_dp
*intel_dp
)
980 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
981 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
982 struct drm_i915_private
*dev_priv
= to_i915(dev
);
983 struct i915_psr
*psr
= &dev_priv
->psr
;
985 const u8 errors
= DP_PSR_RFB_STORAGE_ERROR
|
986 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR
|
987 DP_PSR_LINK_CRC_ERROR
;
989 if (!CAN_PSR(dev_priv
) || !intel_dp_is_edp(intel_dp
))
992 mutex_lock(&psr
->lock
);
994 if (psr
->enabled
!= intel_dp
)
997 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_PSR_STATUS
, &val
) != 1) {
998 DRM_ERROR("PSR_STATUS dpcd read failed\n");
1002 if ((val
& DP_PSR_SINK_STATE_MASK
) == DP_PSR_SINK_INTERNAL_ERROR
) {
1003 DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
1004 intel_psr_disable_locked(intel_dp
);
1007 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_PSR_ERROR_STATUS
, &val
) != 1) {
1008 DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
1012 if (val
& DP_PSR_RFB_STORAGE_ERROR
)
1013 DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
1014 if (val
& DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR
)
1015 DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
1016 if (val
& DP_PSR_LINK_CRC_ERROR
)
1017 DRM_ERROR("PSR Link CRC error, disabling PSR\n");
1020 DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
1023 intel_psr_disable_locked(intel_dp
);
1024 /* clear status register */
1025 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_ERROR_STATUS
, val
);
1027 /* TODO: handle PSR2 errors */
1029 mutex_unlock(&psr
->lock
);