2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
26 #include "display/intel_dp.h"
29 #include "intel_atomic.h"
30 #include "intel_display_types.h"
31 #include "intel_psr.h"
32 #include "intel_sprite.h"
35 * DOC: Panel Self Refresh (PSR/SRD)
37 * Since Haswell Display controller supports Panel Self-Refresh on display
38 * panels witch have a remote frame buffer (RFB) implemented according to PSR
39 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
40 * when system is idle but display is on as it eliminates display refresh
41 * request to DDR memory completely as long as the frame buffer for that
42 * display is unchanged.
44 * Panel Self Refresh must be supported by both Hardware (source) and
47 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
48 * to power down the link and memory controller. For DSI panels the same idea
49 * is called "manual mode".
51 * The implementation uses the hardware-based PSR support which automatically
52 * enters/exits self-refresh mode. The hardware takes care of sending the
53 * required DP aux message and could even retrain the link (that part isn't
54 * enabled yet though). The hardware also keeps track of any frontbuffer
55 * changes to know when to exit self-refresh mode again. Unfortunately that
56 * part doesn't work too well, hence why the i915 PSR support uses the
57 * software frontbuffer tracking to make sure it doesn't miss a screen
58 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
59 * get called by the frontbuffer tracking code. Note that because of locking
60 * issues the self-refresh re-enable code is done from a work queue, which
61 * must be correctly synchronized/cancelled when shutting down the pipe."
64 static bool psr_global_enabled(u32 debug
)
66 switch (debug
& I915_PSR_DEBUG_MODE_MASK
) {
67 case I915_PSR_DEBUG_DEFAULT
:
68 return i915_modparams
.enable_psr
;
69 case I915_PSR_DEBUG_DISABLE
:
76 static bool intel_psr2_enabled(struct drm_i915_private
*dev_priv
,
77 const struct intel_crtc_state
*crtc_state
)
79 /* Cannot enable DSC and PSR2 simultaneously */
80 WARN_ON(crtc_state
->dsc
.compression_enable
&&
81 crtc_state
->has_psr2
);
83 switch (dev_priv
->psr
.debug
& I915_PSR_DEBUG_MODE_MASK
) {
84 case I915_PSR_DEBUG_DISABLE
:
85 case I915_PSR_DEBUG_FORCE_PSR1
:
88 return crtc_state
->has_psr2
;
92 static void psr_irq_control(struct drm_i915_private
*dev_priv
)
94 enum transcoder trans_shift
;
99 * gen12+ has registers relative to transcoder and one per transcoder
100 * using the same bit definition: handle it as TRANSCODER_EDP to force
101 * 0 shift in bit definition
103 if (INTEL_GEN(dev_priv
) >= 12) {
105 imr_reg
= TRANS_PSR_IMR(dev_priv
->psr
.transcoder
);
107 trans_shift
= dev_priv
->psr
.transcoder
;
108 imr_reg
= EDP_PSR_IMR
;
111 mask
= EDP_PSR_ERROR(trans_shift
);
112 if (dev_priv
->psr
.debug
& I915_PSR_DEBUG_IRQ
)
113 mask
|= EDP_PSR_POST_EXIT(trans_shift
) |
114 EDP_PSR_PRE_ENTRY(trans_shift
);
116 /* Warning: it is masking/setting reserved bits too */
117 val
= I915_READ(imr_reg
);
118 val
&= ~EDP_PSR_TRANS_MASK(trans_shift
);
120 I915_WRITE(imr_reg
, val
);
123 static void psr_event_print(u32 val
, bool psr2_enabled
)
125 DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val
);
126 if (val
& PSR_EVENT_PSR2_WD_TIMER_EXPIRE
)
127 DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
128 if ((val
& PSR_EVENT_PSR2_DISABLED
) && psr2_enabled
)
129 DRM_DEBUG_KMS("\tPSR2 disabled\n");
130 if (val
& PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN
)
131 DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
132 if (val
& PSR_EVENT_SU_CRC_FIFO_UNDERRUN
)
133 DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
134 if (val
& PSR_EVENT_GRAPHICS_RESET
)
135 DRM_DEBUG_KMS("\tGraphics reset\n");
136 if (val
& PSR_EVENT_PCH_INTERRUPT
)
137 DRM_DEBUG_KMS("\tPCH interrupt\n");
138 if (val
& PSR_EVENT_MEMORY_UP
)
139 DRM_DEBUG_KMS("\tMemory up\n");
140 if (val
& PSR_EVENT_FRONT_BUFFER_MODIFY
)
141 DRM_DEBUG_KMS("\tFront buffer modification\n");
142 if (val
& PSR_EVENT_WD_TIMER_EXPIRE
)
143 DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
144 if (val
& PSR_EVENT_PIPE_REGISTERS_UPDATE
)
145 DRM_DEBUG_KMS("\tPIPE registers updated\n");
146 if (val
& PSR_EVENT_REGISTER_UPDATE
)
147 DRM_DEBUG_KMS("\tRegister updated\n");
148 if (val
& PSR_EVENT_HDCP_ENABLE
)
149 DRM_DEBUG_KMS("\tHDCP enabled\n");
150 if (val
& PSR_EVENT_KVMR_SESSION_ENABLE
)
151 DRM_DEBUG_KMS("\tKVMR session enabled\n");
152 if (val
& PSR_EVENT_VBI_ENABLE
)
153 DRM_DEBUG_KMS("\tVBI enabled\n");
154 if (val
& PSR_EVENT_LPSP_MODE_EXIT
)
155 DRM_DEBUG_KMS("\tLPSP mode exited\n");
156 if ((val
& PSR_EVENT_PSR_DISABLE
) && !psr2_enabled
)
157 DRM_DEBUG_KMS("\tPSR disabled\n");
160 void intel_psr_irq_handler(struct drm_i915_private
*dev_priv
, u32 psr_iir
)
162 enum transcoder cpu_transcoder
= dev_priv
->psr
.transcoder
;
163 enum transcoder trans_shift
;
165 ktime_t time_ns
= ktime_get();
167 if (INTEL_GEN(dev_priv
) >= 12) {
169 imr_reg
= TRANS_PSR_IMR(dev_priv
->psr
.transcoder
);
171 trans_shift
= dev_priv
->psr
.transcoder
;
172 imr_reg
= EDP_PSR_IMR
;
175 if (psr_iir
& EDP_PSR_PRE_ENTRY(trans_shift
)) {
176 dev_priv
->psr
.last_entry_attempt
= time_ns
;
177 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
178 transcoder_name(cpu_transcoder
));
181 if (psr_iir
& EDP_PSR_POST_EXIT(trans_shift
)) {
182 dev_priv
->psr
.last_exit
= time_ns
;
183 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
184 transcoder_name(cpu_transcoder
));
186 if (INTEL_GEN(dev_priv
) >= 9) {
187 u32 val
= I915_READ(PSR_EVENT(cpu_transcoder
));
188 bool psr2_enabled
= dev_priv
->psr
.psr2_enabled
;
190 I915_WRITE(PSR_EVENT(cpu_transcoder
), val
);
191 psr_event_print(val
, psr2_enabled
);
195 if (psr_iir
& EDP_PSR_ERROR(trans_shift
)) {
198 DRM_WARN("[transcoder %s] PSR aux error\n",
199 transcoder_name(cpu_transcoder
));
201 dev_priv
->psr
.irq_aux_error
= true;
204 * If this interruption is not masked it will keep
205 * interrupting so fast that it prevents the scheduled
207 * Also after a PSR error, we don't want to arm PSR
208 * again so we don't care about unmask the interruption
209 * or unset irq_aux_error.
211 val
= I915_READ(imr_reg
);
212 val
|= EDP_PSR_ERROR(trans_shift
);
213 I915_WRITE(imr_reg
, val
);
215 schedule_work(&dev_priv
->psr
.work
);
219 static bool intel_dp_get_alpm_status(struct intel_dp
*intel_dp
)
223 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_RECEIVER_ALPM_CAP
,
226 return alpm_caps
& DP_ALPM_CAP
;
229 static u8
intel_dp_get_sink_sync_latency(struct intel_dp
*intel_dp
)
231 u8 val
= 8; /* assume the worst if we can't read the value */
233 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
234 DP_SYNCHRONIZATION_LATENCY_IN_SINK
, &val
) == 1)
235 val
&= DP_MAX_RESYNC_FRAME_COUNT_MASK
;
237 DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
241 static u16
intel_dp_get_su_x_granulartiy(struct intel_dp
*intel_dp
)
247 * Returning the default X granularity if granularity not required or
250 if (!(intel_dp
->psr_dpcd
[1] & DP_PSR2_SU_GRANULARITY_REQUIRED
))
253 r
= drm_dp_dpcd_read(&intel_dp
->aux
, DP_PSR2_SU_X_GRANULARITY
, &val
, 2);
255 DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
258 * Spec says that if the value read is 0 the default granularity should
261 if (r
!= 2 || val
== 0)
267 void intel_psr_init_dpcd(struct intel_dp
*intel_dp
)
269 struct drm_i915_private
*dev_priv
=
270 to_i915(dp_to_dig_port(intel_dp
)->base
.base
.dev
);
272 if (dev_priv
->psr
.dp
) {
273 DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
277 drm_dp_dpcd_read(&intel_dp
->aux
, DP_PSR_SUPPORT
, intel_dp
->psr_dpcd
,
278 sizeof(intel_dp
->psr_dpcd
));
280 if (!intel_dp
->psr_dpcd
[0])
282 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
283 intel_dp
->psr_dpcd
[0]);
285 if (drm_dp_has_quirk(&intel_dp
->desc
, DP_DPCD_QUIRK_NO_PSR
)) {
286 DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
290 if (!(intel_dp
->edp_dpcd
[1] & DP_EDP_SET_POWER_CAP
)) {
291 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
295 dev_priv
->psr
.sink_support
= true;
296 dev_priv
->psr
.sink_sync_latency
=
297 intel_dp_get_sink_sync_latency(intel_dp
);
299 dev_priv
->psr
.dp
= intel_dp
;
301 if (INTEL_GEN(dev_priv
) >= 9 &&
302 (intel_dp
->psr_dpcd
[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED
)) {
303 bool y_req
= intel_dp
->psr_dpcd
[1] &
304 DP_PSR2_SU_Y_COORDINATE_REQUIRED
;
305 bool alpm
= intel_dp_get_alpm_status(intel_dp
);
308 * All panels that supports PSR version 03h (PSR2 +
309 * Y-coordinate) can handle Y-coordinates in VSC but we are
310 * only sure that it is going to be used when required by the
311 * panel. This way panel is capable to do selective update
312 * without a aux frame sync.
314 * To support PSR version 02h and PSR version 03h without
315 * Y-coordinate requirement panels we would need to enable
318 dev_priv
->psr
.sink_psr2_support
= y_req
&& alpm
;
319 DRM_DEBUG_KMS("PSR2 %ssupported\n",
320 dev_priv
->psr
.sink_psr2_support
? "" : "not ");
322 if (dev_priv
->psr
.sink_psr2_support
) {
323 dev_priv
->psr
.colorimetry_support
=
324 intel_dp_get_colorimetry_status(intel_dp
);
325 dev_priv
->psr
.su_x_granularity
=
326 intel_dp_get_su_x_granulartiy(intel_dp
);
331 static void intel_psr_setup_vsc(struct intel_dp
*intel_dp
,
332 const struct intel_crtc_state
*crtc_state
)
334 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
335 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
336 struct dp_sdp psr_vsc
;
338 if (dev_priv
->psr
.psr2_enabled
) {
339 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
340 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
341 psr_vsc
.sdp_header
.HB0
= 0;
342 psr_vsc
.sdp_header
.HB1
= 0x7;
343 if (dev_priv
->psr
.colorimetry_support
) {
344 psr_vsc
.sdp_header
.HB2
= 0x5;
345 psr_vsc
.sdp_header
.HB3
= 0x13;
347 psr_vsc
.sdp_header
.HB2
= 0x4;
348 psr_vsc
.sdp_header
.HB3
= 0xe;
351 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
352 memset(&psr_vsc
, 0, sizeof(psr_vsc
));
353 psr_vsc
.sdp_header
.HB0
= 0;
354 psr_vsc
.sdp_header
.HB1
= 0x7;
355 psr_vsc
.sdp_header
.HB2
= 0x2;
356 psr_vsc
.sdp_header
.HB3
= 0x8;
359 intel_dig_port
->write_infoframe(&intel_dig_port
->base
,
361 DP_SDP_VSC
, &psr_vsc
, sizeof(psr_vsc
));
364 static void hsw_psr_setup_aux(struct intel_dp
*intel_dp
)
366 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
367 u32 aux_clock_divider
, aux_ctl
;
369 static const u8 aux_msg
[] = {
370 [0] = DP_AUX_NATIVE_WRITE
<< 4,
371 [1] = DP_SET_POWER
>> 8,
372 [2] = DP_SET_POWER
& 0xff,
374 [4] = DP_SET_POWER_D0
,
376 u32 psr_aux_mask
= EDP_PSR_AUX_CTL_TIME_OUT_MASK
|
377 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK
|
378 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK
|
379 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK
;
381 BUILD_BUG_ON(sizeof(aux_msg
) > 20);
382 for (i
= 0; i
< sizeof(aux_msg
); i
+= 4)
383 I915_WRITE(EDP_PSR_AUX_DATA(dev_priv
->psr
.transcoder
, i
>> 2),
384 intel_dp_pack_aux(&aux_msg
[i
], sizeof(aux_msg
) - i
));
386 aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, 0);
388 /* Start with bits set for DDI_AUX_CTL register */
389 aux_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
, sizeof(aux_msg
),
392 /* Select only valid bits for SRD_AUX_CTL */
393 aux_ctl
&= psr_aux_mask
;
394 I915_WRITE(EDP_PSR_AUX_CTL(dev_priv
->psr
.transcoder
), aux_ctl
);
397 static void intel_psr_enable_sink(struct intel_dp
*intel_dp
)
399 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
400 u8 dpcd_val
= DP_PSR_ENABLE
;
402 /* Enable ALPM at sink for psr2 */
403 if (dev_priv
->psr
.psr2_enabled
) {
404 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_RECEIVER_ALPM_CONFIG
,
406 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE
);
408 dpcd_val
|= DP_PSR_ENABLE_PSR2
| DP_PSR_IRQ_HPD_WITH_CRC_ERRORS
;
410 if (dev_priv
->psr
.link_standby
)
411 dpcd_val
|= DP_PSR_MAIN_LINK_ACTIVE
;
413 if (INTEL_GEN(dev_priv
) >= 8)
414 dpcd_val
|= DP_PSR_CRC_VERIFICATION
;
417 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
, dpcd_val
);
419 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
, DP_SET_POWER_D0
);
422 static u32
intel_psr1_get_tp_time(struct intel_dp
*intel_dp
)
424 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
427 if (INTEL_GEN(dev_priv
) >= 11)
428 val
|= EDP_PSR_TP4_TIME_0US
;
430 if (dev_priv
->vbt
.psr
.tp1_wakeup_time_us
== 0)
431 val
|= EDP_PSR_TP1_TIME_0us
;
432 else if (dev_priv
->vbt
.psr
.tp1_wakeup_time_us
<= 100)
433 val
|= EDP_PSR_TP1_TIME_100us
;
434 else if (dev_priv
->vbt
.psr
.tp1_wakeup_time_us
<= 500)
435 val
|= EDP_PSR_TP1_TIME_500us
;
437 val
|= EDP_PSR_TP1_TIME_2500us
;
439 if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
== 0)
440 val
|= EDP_PSR_TP2_TP3_TIME_0us
;
441 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 100)
442 val
|= EDP_PSR_TP2_TP3_TIME_100us
;
443 else if (dev_priv
->vbt
.psr
.tp2_tp3_wakeup_time_us
<= 500)
444 val
|= EDP_PSR_TP2_TP3_TIME_500us
;
446 val
|= EDP_PSR_TP2_TP3_TIME_2500us
;
448 if (intel_dp_source_supports_hbr2(intel_dp
) &&
449 drm_dp_tps3_supported(intel_dp
->dpcd
))
450 val
|= EDP_PSR_TP1_TP3_SEL
;
452 val
|= EDP_PSR_TP1_TP2_SEL
;
457 static void hsw_activate_psr1(struct intel_dp
*intel_dp
)
459 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
460 u32 max_sleep_time
= 0x1f;
461 u32 val
= EDP_PSR_ENABLE
;
463 /* Let's use 6 as the minimum to cover all known cases including the
464 * off-by-one issue that HW has in some cases.
466 int idle_frames
= max(6, dev_priv
->vbt
.psr
.idle_frames
);
468 /* sink_sync_latency of 8 means source has to wait for more than 8
469 * frames, we'll go with 9 frames for now
471 idle_frames
= max(idle_frames
, dev_priv
->psr
.sink_sync_latency
+ 1);
472 val
|= idle_frames
<< EDP_PSR_IDLE_FRAME_SHIFT
;
474 val
|= max_sleep_time
<< EDP_PSR_MAX_SLEEP_TIME_SHIFT
;
475 if (IS_HASWELL(dev_priv
))
476 val
|= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES
;
478 if (dev_priv
->psr
.link_standby
)
479 val
|= EDP_PSR_LINK_STANDBY
;
481 val
|= intel_psr1_get_tp_time(intel_dp
);
483 if (INTEL_GEN(dev_priv
) >= 8)
484 val
|= EDP_PSR_CRC_ENABLE
;
486 val
|= (I915_READ(EDP_PSR_CTL(dev_priv
->psr
.transcoder
)) &
487 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK
);
488 I915_WRITE(EDP_PSR_CTL(dev_priv
->psr
.transcoder
), val
);
491 static void hsw_activate_psr2(struct intel_dp
*intel_dp
)
493 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
496 /* Let's use 6 as the minimum to cover all known cases including the
497 * off-by-one issue that HW has in some cases.
499 int idle_frames
= max(6, dev_priv
->vbt
.psr
.idle_frames
);
501 idle_frames
= max(idle_frames
, dev_priv
->psr
.sink_sync_latency
+ 1);
502 val
= idle_frames
<< EDP_PSR2_IDLE_FRAME_SHIFT
;
504 val
|= EDP_PSR2_ENABLE
| EDP_SU_TRACK_ENABLE
;
505 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
))
506 val
|= EDP_Y_COORDINATE_ENABLE
;
508 val
|= EDP_PSR2_FRAME_BEFORE_SU(dev_priv
->psr
.sink_sync_latency
+ 1);
510 if (dev_priv
->vbt
.psr
.psr2_tp2_tp3_wakeup_time_us
>= 0 &&
511 dev_priv
->vbt
.psr
.psr2_tp2_tp3_wakeup_time_us
<= 50)
512 val
|= EDP_PSR2_TP2_TIME_50us
;
513 else if (dev_priv
->vbt
.psr
.psr2_tp2_tp3_wakeup_time_us
<= 100)
514 val
|= EDP_PSR2_TP2_TIME_100us
;
515 else if (dev_priv
->vbt
.psr
.psr2_tp2_tp3_wakeup_time_us
<= 500)
516 val
|= EDP_PSR2_TP2_TIME_500us
;
518 val
|= EDP_PSR2_TP2_TIME_2500us
;
521 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
522 * recommending keep this bit unset while PSR2 is enabled.
524 I915_WRITE(EDP_PSR_CTL(dev_priv
->psr
.transcoder
), 0);
526 I915_WRITE(EDP_PSR2_CTL(dev_priv
->psr
.transcoder
), val
);
530 transcoder_has_psr2(struct drm_i915_private
*dev_priv
, enum transcoder trans
)
532 if (INTEL_GEN(dev_priv
) < 9)
534 else if (INTEL_GEN(dev_priv
) >= 12)
535 return trans
== TRANSCODER_A
;
537 return trans
== TRANSCODER_EDP
;
540 static u32
intel_get_frame_time_us(const struct intel_crtc_state
*cstate
)
542 if (!cstate
|| !cstate
->hw
.active
)
545 return DIV_ROUND_UP(1000 * 1000,
546 drm_mode_vrefresh(&cstate
->hw
.adjusted_mode
));
549 static void psr2_program_idle_frames(struct drm_i915_private
*dev_priv
,
554 idle_frames
<<= EDP_PSR2_IDLE_FRAME_SHIFT
;
555 val
= I915_READ(EDP_PSR2_CTL(dev_priv
->psr
.transcoder
));
556 val
&= ~EDP_PSR2_IDLE_FRAME_MASK
;
558 I915_WRITE(EDP_PSR2_CTL(dev_priv
->psr
.transcoder
), val
);
561 static void tgl_psr2_enable_dc3co(struct drm_i915_private
*dev_priv
)
563 psr2_program_idle_frames(dev_priv
, 0);
564 intel_display_power_set_target_dc_state(dev_priv
, DC_STATE_EN_DC3CO
);
567 static void tgl_psr2_disable_dc3co(struct drm_i915_private
*dev_priv
)
571 intel_display_power_set_target_dc_state(dev_priv
, DC_STATE_EN_UPTO_DC6
);
573 * Restore PSR2 idle frame let's use 6 as the minimum to cover all known
574 * cases including the off-by-one issue that HW has in some cases.
576 idle_frames
= max(6, dev_priv
->vbt
.psr
.idle_frames
);
577 idle_frames
= max(idle_frames
, dev_priv
->psr
.sink_sync_latency
+ 1);
578 psr2_program_idle_frames(dev_priv
, idle_frames
);
581 static void tgl_dc5_idle_thread(struct work_struct
*work
)
583 struct drm_i915_private
*dev_priv
=
584 container_of(work
, typeof(*dev_priv
), psr
.idle_work
.work
);
586 mutex_lock(&dev_priv
->psr
.lock
);
587 /* If delayed work is pending, it is not idle */
588 if (delayed_work_pending(&dev_priv
->psr
.idle_work
))
591 DRM_DEBUG_KMS("DC5/6 idle thread\n");
592 tgl_psr2_disable_dc3co(dev_priv
);
594 mutex_unlock(&dev_priv
->psr
.lock
);
597 static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private
*dev_priv
)
599 if (!dev_priv
->psr
.dc3co_enabled
)
602 cancel_delayed_work(&dev_priv
->psr
.idle_work
);
603 /* Before PSR2 exit disallow dc3co*/
604 tgl_psr2_disable_dc3co(dev_priv
);
607 static bool intel_psr2_config_valid(struct intel_dp
*intel_dp
,
608 struct intel_crtc_state
*crtc_state
)
610 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
611 int crtc_hdisplay
= crtc_state
->hw
.adjusted_mode
.crtc_hdisplay
;
612 int crtc_vdisplay
= crtc_state
->hw
.adjusted_mode
.crtc_vdisplay
;
613 int psr_max_h
= 0, psr_max_v
= 0, max_bpp
= 0;
615 if (!dev_priv
->psr
.sink_psr2_support
)
618 if (!transcoder_has_psr2(dev_priv
, crtc_state
->cpu_transcoder
)) {
619 DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
620 transcoder_name(crtc_state
->cpu_transcoder
));
625 * DSC and PSR2 cannot be enabled simultaneously. If a requested
626 * resolution requires DSC to be enabled, priority is given to DSC
629 if (crtc_state
->dsc
.compression_enable
) {
630 DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
634 if (INTEL_GEN(dev_priv
) >= 12) {
638 } else if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
)) {
642 } else if (IS_GEN(dev_priv
, 9)) {
648 if (crtc_hdisplay
> psr_max_h
|| crtc_vdisplay
> psr_max_v
) {
649 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
650 crtc_hdisplay
, crtc_vdisplay
,
651 psr_max_h
, psr_max_v
);
655 if (crtc_state
->pipe_bpp
> max_bpp
) {
656 DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n",
657 crtc_state
->pipe_bpp
, max_bpp
);
662 * HW sends SU blocks of size four scan lines, which means the starting
663 * X coordinate and Y granularity requirements will always be met. We
664 * only need to validate the SU block width is a multiple of
667 if (crtc_hdisplay
% dev_priv
->psr
.su_x_granularity
) {
668 DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
669 crtc_hdisplay
, dev_priv
->psr
.su_x_granularity
);
673 if (crtc_state
->crc_enabled
) {
674 DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
681 void intel_psr_compute_config(struct intel_dp
*intel_dp
,
682 struct intel_crtc_state
*crtc_state
)
684 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
685 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
686 const struct drm_display_mode
*adjusted_mode
=
687 &crtc_state
->hw
.adjusted_mode
;
690 if (!CAN_PSR(dev_priv
))
693 if (intel_dp
!= dev_priv
->psr
.dp
)
697 * HSW spec explicitly says PSR is tied to port A.
698 * BDW+ platforms have a instance of PSR registers per transcoder but
699 * for now it only supports one instance of PSR, so lets keep it
700 * hardcoded to PORT_A
702 if (dig_port
->base
.port
!= PORT_A
) {
703 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
707 if (dev_priv
->psr
.sink_not_reliable
) {
708 DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
712 if (adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
713 DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
717 psr_setup_time
= drm_dp_psr_setup_time(intel_dp
->psr_dpcd
);
718 if (psr_setup_time
< 0) {
719 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
720 intel_dp
->psr_dpcd
[1]);
724 if (intel_usecs_to_scanlines(adjusted_mode
, psr_setup_time
) >
725 adjusted_mode
->crtc_vtotal
- adjusted_mode
->crtc_vdisplay
- 1) {
726 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
731 crtc_state
->has_psr
= true;
732 crtc_state
->has_psr2
= intel_psr2_config_valid(intel_dp
, crtc_state
);
735 static void intel_psr_activate(struct intel_dp
*intel_dp
)
737 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
739 if (transcoder_has_psr2(dev_priv
, dev_priv
->psr
.transcoder
))
740 WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv
->psr
.transcoder
)) & EDP_PSR2_ENABLE
);
742 WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv
->psr
.transcoder
)) & EDP_PSR_ENABLE
);
743 WARN_ON(dev_priv
->psr
.active
);
744 lockdep_assert_held(&dev_priv
->psr
.lock
);
746 /* psr1 and psr2 are mutually exclusive.*/
747 if (dev_priv
->psr
.psr2_enabled
)
748 hsw_activate_psr2(intel_dp
);
750 hsw_activate_psr1(intel_dp
);
752 dev_priv
->psr
.active
= true;
755 static void intel_psr_enable_source(struct intel_dp
*intel_dp
,
756 const struct intel_crtc_state
*crtc_state
)
758 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
759 enum transcoder cpu_transcoder
= crtc_state
->cpu_transcoder
;
762 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
763 * use hardcoded values PSR AUX transactions
765 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
766 hsw_psr_setup_aux(intel_dp
);
768 if (dev_priv
->psr
.psr2_enabled
&& (IS_GEN(dev_priv
, 9) &&
769 !IS_GEMINILAKE(dev_priv
))) {
770 i915_reg_t reg
= CHICKEN_TRANS(cpu_transcoder
);
771 u32 chicken
= I915_READ(reg
);
773 chicken
|= PSR2_VSC_ENABLE_PROG_HEADER
|
774 PSR2_ADD_VERTICAL_LINE_COUNT
;
775 I915_WRITE(reg
, chicken
);
779 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
780 * mask LPSP to avoid dependency on other drivers that might block
781 * runtime_pm besides preventing other hw tracking issues now we
782 * can rely on frontbuffer tracking.
784 mask
= EDP_PSR_DEBUG_MASK_MEMUP
|
785 EDP_PSR_DEBUG_MASK_HPD
|
786 EDP_PSR_DEBUG_MASK_LPSP
|
787 EDP_PSR_DEBUG_MASK_MAX_SLEEP
;
789 if (INTEL_GEN(dev_priv
) < 11)
790 mask
|= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE
;
792 I915_WRITE(EDP_PSR_DEBUG(dev_priv
->psr
.transcoder
), mask
);
794 psr_irq_control(dev_priv
);
797 static void intel_psr_enable_locked(struct drm_i915_private
*dev_priv
,
798 const struct intel_crtc_state
*crtc_state
)
800 struct intel_dp
*intel_dp
= dev_priv
->psr
.dp
;
803 WARN_ON(dev_priv
->psr
.enabled
);
805 dev_priv
->psr
.psr2_enabled
= intel_psr2_enabled(dev_priv
, crtc_state
);
806 dev_priv
->psr
.busy_frontbuffer_bits
= 0;
807 dev_priv
->psr
.pipe
= to_intel_crtc(crtc_state
->uapi
.crtc
)->pipe
;
808 dev_priv
->psr
.dc3co_enabled
= !!crtc_state
->dc3co_exitline
;
809 dev_priv
->psr
.dc3co_exit_delay
= intel_get_frame_time_us(crtc_state
);
810 dev_priv
->psr
.transcoder
= crtc_state
->cpu_transcoder
;
813 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
814 * will still keep the error set even after the reset done in the
815 * irq_preinstall and irq_uninstall hooks.
816 * And enabling in this situation cause the screen to freeze in the
817 * first time that PSR HW tries to activate so lets keep PSR disabled
818 * to avoid any rendering problems.
820 if (INTEL_GEN(dev_priv
) >= 12) {
821 val
= I915_READ(TRANS_PSR_IIR(dev_priv
->psr
.transcoder
));
822 val
&= EDP_PSR_ERROR(0);
824 val
= I915_READ(EDP_PSR_IIR
);
825 val
&= EDP_PSR_ERROR(dev_priv
->psr
.transcoder
);
828 dev_priv
->psr
.sink_not_reliable
= true;
829 DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
833 DRM_DEBUG_KMS("Enabling PSR%s\n",
834 dev_priv
->psr
.psr2_enabled
? "2" : "1");
835 intel_psr_setup_vsc(intel_dp
, crtc_state
);
836 intel_psr_enable_sink(intel_dp
);
837 intel_psr_enable_source(intel_dp
, crtc_state
);
838 dev_priv
->psr
.enabled
= true;
840 intel_psr_activate(intel_dp
);
844 * intel_psr_enable - Enable PSR
845 * @intel_dp: Intel DP
846 * @crtc_state: new CRTC state
848 * This function can only be called after the pipe is fully trained and enabled.
850 void intel_psr_enable(struct intel_dp
*intel_dp
,
851 const struct intel_crtc_state
*crtc_state
)
853 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
855 if (!crtc_state
->has_psr
)
858 if (WARN_ON(!CAN_PSR(dev_priv
)))
861 WARN_ON(dev_priv
->drrs
.dp
);
863 mutex_lock(&dev_priv
->psr
.lock
);
865 if (!psr_global_enabled(dev_priv
->psr
.debug
)) {
866 DRM_DEBUG_KMS("PSR disabled by flag\n");
870 intel_psr_enable_locked(dev_priv
, crtc_state
);
873 mutex_unlock(&dev_priv
->psr
.lock
);
876 static void intel_psr_exit(struct drm_i915_private
*dev_priv
)
880 if (!dev_priv
->psr
.active
) {
881 if (transcoder_has_psr2(dev_priv
, dev_priv
->psr
.transcoder
)) {
882 val
= I915_READ(EDP_PSR2_CTL(dev_priv
->psr
.transcoder
));
883 WARN_ON(val
& EDP_PSR2_ENABLE
);
886 val
= I915_READ(EDP_PSR_CTL(dev_priv
->psr
.transcoder
));
887 WARN_ON(val
& EDP_PSR_ENABLE
);
892 if (dev_priv
->psr
.psr2_enabled
) {
893 tgl_disallow_dc3co_on_psr2_exit(dev_priv
);
894 val
= I915_READ(EDP_PSR2_CTL(dev_priv
->psr
.transcoder
));
895 WARN_ON(!(val
& EDP_PSR2_ENABLE
));
896 val
&= ~EDP_PSR2_ENABLE
;
897 I915_WRITE(EDP_PSR2_CTL(dev_priv
->psr
.transcoder
), val
);
899 val
= I915_READ(EDP_PSR_CTL(dev_priv
->psr
.transcoder
));
900 WARN_ON(!(val
& EDP_PSR_ENABLE
));
901 val
&= ~EDP_PSR_ENABLE
;
902 I915_WRITE(EDP_PSR_CTL(dev_priv
->psr
.transcoder
), val
);
904 dev_priv
->psr
.active
= false;
907 static void intel_psr_disable_locked(struct intel_dp
*intel_dp
)
909 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
910 i915_reg_t psr_status
;
913 lockdep_assert_held(&dev_priv
->psr
.lock
);
915 if (!dev_priv
->psr
.enabled
)
918 DRM_DEBUG_KMS("Disabling PSR%s\n",
919 dev_priv
->psr
.psr2_enabled
? "2" : "1");
921 intel_psr_exit(dev_priv
);
923 if (dev_priv
->psr
.psr2_enabled
) {
924 psr_status
= EDP_PSR2_STATUS(dev_priv
->psr
.transcoder
);
925 psr_status_mask
= EDP_PSR2_STATUS_STATE_MASK
;
927 psr_status
= EDP_PSR_STATUS(dev_priv
->psr
.transcoder
);
928 psr_status_mask
= EDP_PSR_STATUS_STATE_MASK
;
931 /* Wait till PSR is idle */
932 if (intel_de_wait_for_clear(dev_priv
, psr_status
,
933 psr_status_mask
, 2000))
934 DRM_ERROR("Timed out waiting PSR idle state\n");
936 /* Disable PSR on Sink */
937 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_EN_CFG
, 0);
939 if (dev_priv
->psr
.psr2_enabled
)
940 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_RECEIVER_ALPM_CONFIG
, 0);
942 dev_priv
->psr
.enabled
= false;
946 * intel_psr_disable - Disable PSR
947 * @intel_dp: Intel DP
948 * @old_crtc_state: old CRTC state
950 * This function needs to be called before disabling pipe.
952 void intel_psr_disable(struct intel_dp
*intel_dp
,
953 const struct intel_crtc_state
*old_crtc_state
)
955 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
957 if (!old_crtc_state
->has_psr
)
960 if (WARN_ON(!CAN_PSR(dev_priv
)))
963 mutex_lock(&dev_priv
->psr
.lock
);
965 intel_psr_disable_locked(intel_dp
);
967 mutex_unlock(&dev_priv
->psr
.lock
);
968 cancel_work_sync(&dev_priv
->psr
.work
);
969 cancel_delayed_work_sync(&dev_priv
->psr
.idle_work
);
972 static void psr_force_hw_tracking_exit(struct drm_i915_private
*dev_priv
)
974 if (INTEL_GEN(dev_priv
) >= 9)
976 * Display WA #0884: skl+
977 * This documented WA for bxt can be safely applied
978 * broadly so we can force HW tracking to exit PSR
979 * instead of disabling and re-enabling.
980 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
981 * but it makes more sense write to the current active
984 I915_WRITE(CURSURFLIVE(dev_priv
->psr
.pipe
), 0);
987 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
988 * on older gens so doing the manual exit instead.
990 intel_psr_exit(dev_priv
);
994 * intel_psr_update - Update PSR state
995 * @intel_dp: Intel DP
996 * @crtc_state: new CRTC state
998 * This functions will update PSR states, disabling, enabling or switching PSR
999 * version when executing fastsets. For full modeset, intel_psr_disable() and
1000 * intel_psr_enable() should be called instead.
1002 void intel_psr_update(struct intel_dp
*intel_dp
,
1003 const struct intel_crtc_state
*crtc_state
)
1005 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1006 struct i915_psr
*psr
= &dev_priv
->psr
;
1007 bool enable
, psr2_enable
;
1009 if (!CAN_PSR(dev_priv
) || READ_ONCE(psr
->dp
) != intel_dp
)
1012 mutex_lock(&dev_priv
->psr
.lock
);
1014 enable
= crtc_state
->has_psr
&& psr_global_enabled(psr
->debug
);
1015 psr2_enable
= intel_psr2_enabled(dev_priv
, crtc_state
);
1017 if (enable
== psr
->enabled
&& psr2_enable
== psr
->psr2_enabled
) {
1018 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
1019 if (crtc_state
->crc_enabled
&& psr
->enabled
)
1020 psr_force_hw_tracking_exit(dev_priv
);
1021 else if (INTEL_GEN(dev_priv
) < 9 && psr
->enabled
) {
1023 * Activate PSR again after a force exit when enabling
1026 if (!dev_priv
->psr
.active
&&
1027 !dev_priv
->psr
.busy_frontbuffer_bits
)
1028 schedule_work(&dev_priv
->psr
.work
);
1035 intel_psr_disable_locked(intel_dp
);
1038 intel_psr_enable_locked(dev_priv
, crtc_state
);
1041 mutex_unlock(&dev_priv
->psr
.lock
);
1045 * intel_psr_wait_for_idle - wait for PSR1 to idle
1046 * @new_crtc_state: new CRTC state
1047 * @out_value: PSR status in case of failure
1049 * This function is expected to be called from pipe_update_start() where it is
1050 * not expected to race with PSR enable or disable.
1052 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
1054 int intel_psr_wait_for_idle(const struct intel_crtc_state
*new_crtc_state
,
1057 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->uapi
.crtc
);
1058 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1060 if (!dev_priv
->psr
.enabled
|| !new_crtc_state
->has_psr
)
1063 /* FIXME: Update this for PSR2 if we need to wait for idle */
1064 if (READ_ONCE(dev_priv
->psr
.psr2_enabled
))
1068 * From bspec: Panel Self Refresh (BDW+)
1069 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
1070 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
1071 * defensive enough to cover everything.
1074 return __intel_wait_for_register(&dev_priv
->uncore
,
1075 EDP_PSR_STATUS(dev_priv
->psr
.transcoder
),
1076 EDP_PSR_STATUS_STATE_MASK
,
1077 EDP_PSR_STATUS_STATE_IDLE
, 2, 50,
1081 static bool __psr_wait_for_idle_locked(struct drm_i915_private
*dev_priv
)
1087 if (!dev_priv
->psr
.enabled
)
1090 if (dev_priv
->psr
.psr2_enabled
) {
1091 reg
= EDP_PSR2_STATUS(dev_priv
->psr
.transcoder
);
1092 mask
= EDP_PSR2_STATUS_STATE_MASK
;
1094 reg
= EDP_PSR_STATUS(dev_priv
->psr
.transcoder
);
1095 mask
= EDP_PSR_STATUS_STATE_MASK
;
1098 mutex_unlock(&dev_priv
->psr
.lock
);
1100 err
= intel_de_wait_for_clear(dev_priv
, reg
, mask
, 50);
1102 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
1104 /* After the unlocked wait, verify that PSR is still wanted! */
1105 mutex_lock(&dev_priv
->psr
.lock
);
1106 return err
== 0 && dev_priv
->psr
.enabled
;
1109 static int intel_psr_fastset_force(struct drm_i915_private
*dev_priv
)
1111 struct drm_device
*dev
= &dev_priv
->drm
;
1112 struct drm_modeset_acquire_ctx ctx
;
1113 struct drm_atomic_state
*state
;
1114 struct intel_crtc
*crtc
;
1117 state
= drm_atomic_state_alloc(dev
);
1121 drm_modeset_acquire_init(&ctx
, DRM_MODESET_ACQUIRE_INTERRUPTIBLE
);
1122 state
->acquire_ctx
= &ctx
;
1125 for_each_intel_crtc(dev
, crtc
) {
1126 struct intel_crtc_state
*crtc_state
=
1127 intel_atomic_get_crtc_state(state
, crtc
);
1129 if (IS_ERR(crtc_state
)) {
1130 err
= PTR_ERR(crtc_state
);
1134 if (crtc_state
->hw
.active
&& crtc_state
->has_psr
) {
1135 /* Mark mode as changed to trigger a pipe->update() */
1136 crtc_state
->uapi
.mode_changed
= true;
1141 err
= drm_atomic_commit(state
);
1144 if (err
== -EDEADLK
) {
1145 drm_atomic_state_clear(state
);
1146 err
= drm_modeset_backoff(&ctx
);
1151 drm_modeset_drop_locks(&ctx
);
1152 drm_modeset_acquire_fini(&ctx
);
1153 drm_atomic_state_put(state
);
1158 int intel_psr_debug_set(struct drm_i915_private
*dev_priv
, u64 val
)
1160 const u32 mode
= val
& I915_PSR_DEBUG_MODE_MASK
;
1164 if (val
& ~(I915_PSR_DEBUG_IRQ
| I915_PSR_DEBUG_MODE_MASK
) ||
1165 mode
> I915_PSR_DEBUG_FORCE_PSR1
) {
1166 DRM_DEBUG_KMS("Invalid debug mask %llx\n", val
);
1170 ret
= mutex_lock_interruptible(&dev_priv
->psr
.lock
);
1174 old_mode
= dev_priv
->psr
.debug
& I915_PSR_DEBUG_MODE_MASK
;
1175 dev_priv
->psr
.debug
= val
;
1178 * Do it right away if it's already enabled, otherwise it will be done
1179 * when enabling the source.
1181 if (dev_priv
->psr
.enabled
)
1182 psr_irq_control(dev_priv
);
1184 mutex_unlock(&dev_priv
->psr
.lock
);
1186 if (old_mode
!= mode
)
1187 ret
= intel_psr_fastset_force(dev_priv
);
1192 static void intel_psr_handle_irq(struct drm_i915_private
*dev_priv
)
1194 struct i915_psr
*psr
= &dev_priv
->psr
;
1196 intel_psr_disable_locked(psr
->dp
);
1197 psr
->sink_not_reliable
= true;
1198 /* let's make sure that sink is awaken */
1199 drm_dp_dpcd_writeb(&psr
->dp
->aux
, DP_SET_POWER
, DP_SET_POWER_D0
);
1202 static void intel_psr_work(struct work_struct
*work
)
1204 struct drm_i915_private
*dev_priv
=
1205 container_of(work
, typeof(*dev_priv
), psr
.work
);
1207 mutex_lock(&dev_priv
->psr
.lock
);
1209 if (!dev_priv
->psr
.enabled
)
1212 if (READ_ONCE(dev_priv
->psr
.irq_aux_error
))
1213 intel_psr_handle_irq(dev_priv
);
1216 * We have to make sure PSR is ready for re-enable
1217 * otherwise it keeps disabled until next full enable/disable cycle.
1218 * PSR might take some time to get fully disabled
1219 * and be ready for re-enable.
1221 if (!__psr_wait_for_idle_locked(dev_priv
))
1225 * The delayed work can race with an invalidate hence we need to
1226 * recheck. Since psr_flush first clears this and then reschedules we
1227 * won't ever miss a flush when bailing out here.
1229 if (dev_priv
->psr
.busy_frontbuffer_bits
|| dev_priv
->psr
.active
)
1232 intel_psr_activate(dev_priv
->psr
.dp
);
1234 mutex_unlock(&dev_priv
->psr
.lock
);
1238 * intel_psr_invalidate - Invalidade PSR
1239 * @dev_priv: i915 device
1240 * @frontbuffer_bits: frontbuffer plane tracking bits
1241 * @origin: which operation caused the invalidate
1243 * Since the hardware frontbuffer tracking has gaps we need to integrate
1244 * with the software frontbuffer tracking. This function gets called every
1245 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
1246 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
1248 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
1250 void intel_psr_invalidate(struct drm_i915_private
*dev_priv
,
1251 unsigned frontbuffer_bits
, enum fb_op_origin origin
)
1253 if (!CAN_PSR(dev_priv
))
1256 if (origin
== ORIGIN_FLIP
)
1259 mutex_lock(&dev_priv
->psr
.lock
);
1260 if (!dev_priv
->psr
.enabled
) {
1261 mutex_unlock(&dev_priv
->psr
.lock
);
1265 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(dev_priv
->psr
.pipe
);
1266 dev_priv
->psr
.busy_frontbuffer_bits
|= frontbuffer_bits
;
1268 if (frontbuffer_bits
)
1269 intel_psr_exit(dev_priv
);
1271 mutex_unlock(&dev_priv
->psr
.lock
);
1275 * When we will be completely rely on PSR2 S/W tracking in future,
1276 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
1277 * event also therefore tgl_dc3co_flush() require to be changed
1278 * accrodingly in future.
1281 tgl_dc3co_flush(struct drm_i915_private
*dev_priv
,
1282 unsigned int frontbuffer_bits
, enum fb_op_origin origin
)
1286 mutex_lock(&dev_priv
->psr
.lock
);
1288 if (!dev_priv
->psr
.dc3co_enabled
)
1291 if (!dev_priv
->psr
.psr2_enabled
|| !dev_priv
->psr
.active
)
1295 * At every frontbuffer flush flip event modified delay of delayed work,
1296 * when delayed work schedules that means display has been idle.
1298 if (!(frontbuffer_bits
&
1299 INTEL_FRONTBUFFER_ALL_MASK(dev_priv
->psr
.pipe
)))
1302 tgl_psr2_enable_dc3co(dev_priv
);
1303 /* DC5/DC6 required idle frames = 6 */
1304 delay
= 6 * dev_priv
->psr
.dc3co_exit_delay
;
1305 mod_delayed_work(system_wq
, &dev_priv
->psr
.idle_work
,
1306 usecs_to_jiffies(delay
));
1309 mutex_unlock(&dev_priv
->psr
.lock
);
1313 * intel_psr_flush - Flush PSR
1314 * @dev_priv: i915 device
1315 * @frontbuffer_bits: frontbuffer plane tracking bits
1316 * @origin: which operation caused the flush
1318 * Since the hardware frontbuffer tracking has gaps we need to integrate
1319 * with the software frontbuffer tracking. This function gets called every
1320 * time frontbuffer rendering has completed and flushed out to memory. PSR
1321 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1323 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1325 void intel_psr_flush(struct drm_i915_private
*dev_priv
,
1326 unsigned frontbuffer_bits
, enum fb_op_origin origin
)
1328 if (!CAN_PSR(dev_priv
))
1331 if (origin
== ORIGIN_FLIP
) {
1332 tgl_dc3co_flush(dev_priv
, frontbuffer_bits
, origin
);
1336 mutex_lock(&dev_priv
->psr
.lock
);
1337 if (!dev_priv
->psr
.enabled
) {
1338 mutex_unlock(&dev_priv
->psr
.lock
);
1342 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(dev_priv
->psr
.pipe
);
1343 dev_priv
->psr
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
1345 /* By definition flush = invalidate + flush */
1346 if (frontbuffer_bits
)
1347 psr_force_hw_tracking_exit(dev_priv
);
1349 if (!dev_priv
->psr
.active
&& !dev_priv
->psr
.busy_frontbuffer_bits
)
1350 schedule_work(&dev_priv
->psr
.work
);
1351 mutex_unlock(&dev_priv
->psr
.lock
);
1355 * intel_psr_init - Init basic PSR work and mutex.
1356 * @dev_priv: i915 device private
1358 * This function is called only once at driver load to initialize basic
1361 void intel_psr_init(struct drm_i915_private
*dev_priv
)
1363 if (!HAS_PSR(dev_priv
))
1366 if (!dev_priv
->psr
.sink_support
)
1369 if (IS_HASWELL(dev_priv
))
1371 * HSW don't have PSR registers on the same space as transcoder
1372 * so set this to a value that when subtract to the register
1373 * in transcoder space results in the right offset for HSW
1375 dev_priv
->hsw_psr_mmio_adjust
= _SRD_CTL_EDP
- _HSW_EDP_PSR_BASE
;
1377 if (i915_modparams
.enable_psr
== -1)
1378 if (INTEL_GEN(dev_priv
) < 9 || !dev_priv
->vbt
.psr
.enable
)
1379 i915_modparams
.enable_psr
= 0;
1381 /* Set link_standby x link_off defaults */
1382 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
1383 /* HSW and BDW require workarounds that we don't implement. */
1384 dev_priv
->psr
.link_standby
= false;
1385 else if (INTEL_GEN(dev_priv
) < 12)
1386 /* For new platforms up to TGL let's respect VBT back again */
1387 dev_priv
->psr
.link_standby
= dev_priv
->vbt
.psr
.full_link
;
1389 INIT_WORK(&dev_priv
->psr
.work
, intel_psr_work
);
1390 INIT_DELAYED_WORK(&dev_priv
->psr
.idle_work
, tgl_dc5_idle_thread
);
1391 mutex_init(&dev_priv
->psr
.lock
);
1394 static int psr_get_status_and_error_status(struct intel_dp
*intel_dp
,
1395 u8
*status
, u8
*error_status
)
1397 struct drm_dp_aux
*aux
= &intel_dp
->aux
;
1400 ret
= drm_dp_dpcd_readb(aux
, DP_PSR_STATUS
, status
);
1404 ret
= drm_dp_dpcd_readb(aux
, DP_PSR_ERROR_STATUS
, error_status
);
1408 *status
= *status
& DP_PSR_SINK_STATE_MASK
;
1413 static void psr_alpm_check(struct intel_dp
*intel_dp
)
1415 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1416 struct drm_dp_aux
*aux
= &intel_dp
->aux
;
1417 struct i915_psr
*psr
= &dev_priv
->psr
;
1421 if (!psr
->psr2_enabled
)
1424 r
= drm_dp_dpcd_readb(aux
, DP_RECEIVER_ALPM_STATUS
, &val
);
1426 DRM_ERROR("Error reading ALPM status\n");
1430 if (val
& DP_ALPM_LOCK_TIMEOUT_ERROR
) {
1431 intel_psr_disable_locked(intel_dp
);
1432 psr
->sink_not_reliable
= true;
1433 DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n");
1435 /* Clearing error */
1436 drm_dp_dpcd_writeb(aux
, DP_RECEIVER_ALPM_STATUS
, val
);
1440 static void psr_capability_changed_check(struct intel_dp
*intel_dp
)
1442 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1443 struct i915_psr
*psr
= &dev_priv
->psr
;
1447 r
= drm_dp_dpcd_readb(&intel_dp
->aux
, DP_PSR_ESI
, &val
);
1449 DRM_ERROR("Error reading DP_PSR_ESI\n");
1453 if (val
& DP_PSR_CAPS_CHANGE
) {
1454 intel_psr_disable_locked(intel_dp
);
1455 psr
->sink_not_reliable
= true;
1456 DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n");
1459 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_ESI
, val
);
1463 void intel_psr_short_pulse(struct intel_dp
*intel_dp
)
1465 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1466 struct i915_psr
*psr
= &dev_priv
->psr
;
1467 u8 status
, error_status
;
1468 const u8 errors
= DP_PSR_RFB_STORAGE_ERROR
|
1469 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR
|
1470 DP_PSR_LINK_CRC_ERROR
;
1472 if (!CAN_PSR(dev_priv
) || !intel_dp_is_edp(intel_dp
))
1475 mutex_lock(&psr
->lock
);
1477 if (!psr
->enabled
|| psr
->dp
!= intel_dp
)
1480 if (psr_get_status_and_error_status(intel_dp
, &status
, &error_status
)) {
1481 DRM_ERROR("Error reading PSR status or error status\n");
1485 if (status
== DP_PSR_SINK_INTERNAL_ERROR
|| (error_status
& errors
)) {
1486 intel_psr_disable_locked(intel_dp
);
1487 psr
->sink_not_reliable
= true;
1490 if (status
== DP_PSR_SINK_INTERNAL_ERROR
&& !error_status
)
1491 DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
1492 if (error_status
& DP_PSR_RFB_STORAGE_ERROR
)
1493 DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
1494 if (error_status
& DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR
)
1495 DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
1496 if (error_status
& DP_PSR_LINK_CRC_ERROR
)
1497 DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
1499 if (error_status
& ~errors
)
1500 DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
1501 error_status
& ~errors
);
1502 /* clear status register */
1503 drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_PSR_ERROR_STATUS
, error_status
);
1505 psr_alpm_check(intel_dp
);
1506 psr_capability_changed_check(intel_dp
);
1509 mutex_unlock(&psr
->lock
);
1512 bool intel_psr_enabled(struct intel_dp
*intel_dp
)
1514 struct drm_i915_private
*dev_priv
= dp_to_i915(intel_dp
);
1517 if (!CAN_PSR(dev_priv
) || !intel_dp_is_edp(intel_dp
))
1520 mutex_lock(&dev_priv
->psr
.lock
);
1521 ret
= (dev_priv
->psr
.dp
== intel_dp
&& dev_priv
->psr
.enabled
);
1522 mutex_unlock(&dev_priv
->psr
.lock
);
1527 void intel_psr_atomic_check(struct drm_connector
*connector
,
1528 struct drm_connector_state
*old_state
,
1529 struct drm_connector_state
*new_state
)
1531 struct drm_i915_private
*dev_priv
= to_i915(connector
->dev
);
1532 struct intel_connector
*intel_connector
;
1533 struct intel_digital_port
*dig_port
;
1534 struct drm_crtc_state
*crtc_state
;
1536 if (!CAN_PSR(dev_priv
) || !new_state
->crtc
||
1537 dev_priv
->psr
.initially_probed
)
1540 intel_connector
= to_intel_connector(connector
);
1541 dig_port
= enc_to_dig_port(intel_connector
->encoder
);
1542 if (dev_priv
->psr
.dp
!= &dig_port
->dp
)
1545 crtc_state
= drm_atomic_get_new_crtc_state(new_state
->state
,
1547 crtc_state
->mode_changed
= true;
1548 dev_priv
->psr
.initially_probed
= true;