2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Frame Buffer Compression (FBC)
27 * FBC tries to save memory bandwidth (and so power consumption) by
28 * compressing the amount of memory used by the display. It is total
29 * transparent to user space and completely handled in the kernel.
31 * The benefits of FBC are mostly visible with solid backgrounds and
32 * variation-less patterns. It comes from keeping the memory footprint small
33 * and having fewer memory pages opened and accessed for refreshing the display.
35 * i915 is responsible to reserve stolen memory for FBC and configure its
36 * offset on proper registers. The hardware takes care of all
37 * compress/decompress. However there are many known cases where we have to
38 * forcibly disable it to allow proper screen updates.
41 #include <drm/drm_fourcc.h>
44 #include "i915_trace.h"
45 #include "i915_vgpu.h"
46 #include "intel_display_types.h"
47 #include "intel_fbc.h"
48 #include "intel_frontbuffer.h"
51 * For SKL+, the plane source size used by the hardware is based on the value we
52 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
53 * we wrote to PIPESRC.
55 static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache
*cache
,
56 int *width
, int *height
)
59 *width
= cache
->plane
.src_w
;
61 *height
= cache
->plane
.src_h
;
64 static int intel_fbc_calculate_cfb_size(struct drm_i915_private
*dev_priv
,
65 const struct intel_fbc_state_cache
*cache
)
69 intel_fbc_get_plane_source_size(cache
, NULL
, &lines
);
70 if (IS_GEN(dev_priv
, 7))
71 lines
= min(lines
, 2048);
72 else if (INTEL_GEN(dev_priv
) >= 8)
73 lines
= min(lines
, 2560);
75 /* Hardware needs the full buffer stride, not just the active area. */
76 return lines
* cache
->fb
.stride
;
79 static void i8xx_fbc_deactivate(struct drm_i915_private
*dev_priv
)
83 /* Disable compression */
84 fbc_ctl
= intel_de_read(dev_priv
, FBC_CONTROL
);
85 if ((fbc_ctl
& FBC_CTL_EN
) == 0)
88 fbc_ctl
&= ~FBC_CTL_EN
;
89 intel_de_write(dev_priv
, FBC_CONTROL
, fbc_ctl
);
91 /* Wait for compressing bit to clear */
92 if (intel_de_wait_for_clear(dev_priv
, FBC_STATUS
,
93 FBC_STAT_COMPRESSING
, 10)) {
94 drm_dbg_kms(&dev_priv
->drm
, "FBC idle timed out\n");
99 static void i8xx_fbc_activate(struct drm_i915_private
*dev_priv
)
101 struct intel_fbc_reg_params
*params
= &dev_priv
->fbc
.params
;
106 /* Note: fbc.threshold == 1 for i8xx */
107 cfb_pitch
= params
->cfb_size
/ FBC_LL_SIZE
;
108 if (params
->fb
.stride
< cfb_pitch
)
109 cfb_pitch
= params
->fb
.stride
;
111 /* FBC_CTL wants 32B or 64B units */
112 if (IS_GEN(dev_priv
, 2))
113 cfb_pitch
= (cfb_pitch
/ 32) - 1;
115 cfb_pitch
= (cfb_pitch
/ 64) - 1;
118 for (i
= 0; i
< (FBC_LL_SIZE
/ 32) + 1; i
++)
119 intel_de_write(dev_priv
, FBC_TAG(i
), 0);
121 if (IS_GEN(dev_priv
, 4)) {
125 fbc_ctl2
= FBC_CTL_FENCE_DBL
| FBC_CTL_IDLE_IMM
;
126 fbc_ctl2
|= FBC_CTL_PLANE(params
->crtc
.i9xx_plane
);
127 if (params
->fence_id
>= 0)
128 fbc_ctl2
|= FBC_CTL_CPU_FENCE
;
129 intel_de_write(dev_priv
, FBC_CONTROL2
, fbc_ctl2
);
130 intel_de_write(dev_priv
, FBC_FENCE_OFF
,
131 params
->fence_y_offset
);
135 fbc_ctl
= FBC_CTL_INTERVAL(params
->interval
);
136 fbc_ctl
|= FBC_CTL_EN
| FBC_CTL_PERIODIC
;
137 if (IS_I945GM(dev_priv
))
138 fbc_ctl
|= FBC_CTL_C3_IDLE
; /* 945 needs special SR handling */
139 fbc_ctl
|= FBC_CTL_STRIDE(cfb_pitch
& 0xff);
140 if (params
->fence_id
>= 0)
141 fbc_ctl
|= FBC_CTL_FENCENO(params
->fence_id
);
142 intel_de_write(dev_priv
, FBC_CONTROL
, fbc_ctl
);
145 static bool i8xx_fbc_is_active(struct drm_i915_private
*dev_priv
)
147 return intel_de_read(dev_priv
, FBC_CONTROL
) & FBC_CTL_EN
;
150 static void g4x_fbc_activate(struct drm_i915_private
*dev_priv
)
152 struct intel_fbc_reg_params
*params
= &dev_priv
->fbc
.params
;
155 dpfc_ctl
= DPFC_CTL_PLANE(params
->crtc
.i9xx_plane
) | DPFC_SR_EN
;
156 if (params
->fb
.format
->cpp
[0] == 2)
157 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
159 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
161 if (params
->fence_id
>= 0) {
162 dpfc_ctl
|= DPFC_CTL_FENCE_EN
| params
->fence_id
;
163 intel_de_write(dev_priv
, DPFC_FENCE_YOFF
,
164 params
->fence_y_offset
);
166 intel_de_write(dev_priv
, DPFC_FENCE_YOFF
, 0);
170 intel_de_write(dev_priv
, DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
173 static void g4x_fbc_deactivate(struct drm_i915_private
*dev_priv
)
177 /* Disable compression */
178 dpfc_ctl
= intel_de_read(dev_priv
, DPFC_CONTROL
);
179 if (dpfc_ctl
& DPFC_CTL_EN
) {
180 dpfc_ctl
&= ~DPFC_CTL_EN
;
181 intel_de_write(dev_priv
, DPFC_CONTROL
, dpfc_ctl
);
185 static bool g4x_fbc_is_active(struct drm_i915_private
*dev_priv
)
187 return intel_de_read(dev_priv
, DPFC_CONTROL
) & DPFC_CTL_EN
;
190 static void i8xx_fbc_recompress(struct drm_i915_private
*dev_priv
)
192 struct intel_fbc_reg_params
*params
= &dev_priv
->fbc
.params
;
193 enum i9xx_plane_id i9xx_plane
= params
->crtc
.i9xx_plane
;
195 spin_lock_irq(&dev_priv
->uncore
.lock
);
196 intel_de_write_fw(dev_priv
, DSPADDR(i9xx_plane
),
197 intel_de_read_fw(dev_priv
, DSPADDR(i9xx_plane
)));
198 spin_unlock_irq(&dev_priv
->uncore
.lock
);
201 static void i965_fbc_recompress(struct drm_i915_private
*dev_priv
)
203 struct intel_fbc_reg_params
*params
= &dev_priv
->fbc
.params
;
204 enum i9xx_plane_id i9xx_plane
= params
->crtc
.i9xx_plane
;
206 spin_lock_irq(&dev_priv
->uncore
.lock
);
207 intel_de_write_fw(dev_priv
, DSPSURF(i9xx_plane
),
208 intel_de_read_fw(dev_priv
, DSPSURF(i9xx_plane
)));
209 spin_unlock_irq(&dev_priv
->uncore
.lock
);
212 /* This function forces a CFB recompression through the nuke operation. */
213 static void snb_fbc_recompress(struct drm_i915_private
*dev_priv
)
215 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
217 trace_intel_fbc_nuke(fbc
->crtc
);
219 intel_de_write(dev_priv
, MSG_FBC_REND_STATE
, FBC_REND_NUKE
);
220 intel_de_posting_read(dev_priv
, MSG_FBC_REND_STATE
);
223 static void intel_fbc_recompress(struct drm_i915_private
*dev_priv
)
225 if (INTEL_GEN(dev_priv
) >= 6)
226 snb_fbc_recompress(dev_priv
);
227 else if (INTEL_GEN(dev_priv
) >= 4)
228 i965_fbc_recompress(dev_priv
);
230 i8xx_fbc_recompress(dev_priv
);
233 static void ilk_fbc_activate(struct drm_i915_private
*dev_priv
)
235 struct intel_fbc_reg_params
*params
= &dev_priv
->fbc
.params
;
237 int threshold
= dev_priv
->fbc
.threshold
;
239 dpfc_ctl
= DPFC_CTL_PLANE(params
->crtc
.i9xx_plane
);
240 if (params
->fb
.format
->cpp
[0] == 2)
246 dpfc_ctl
|= DPFC_CTL_LIMIT_4X
;
249 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
252 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
256 if (params
->fence_id
>= 0) {
257 dpfc_ctl
|= DPFC_CTL_FENCE_EN
;
258 if (IS_GEN(dev_priv
, 5))
259 dpfc_ctl
|= params
->fence_id
;
260 if (IS_GEN(dev_priv
, 6)) {
261 intel_de_write(dev_priv
, SNB_DPFC_CTL_SA
,
262 SNB_CPU_FENCE_ENABLE
| params
->fence_id
);
263 intel_de_write(dev_priv
, DPFC_CPU_FENCE_OFFSET
,
264 params
->fence_y_offset
);
267 if (IS_GEN(dev_priv
, 6)) {
268 intel_de_write(dev_priv
, SNB_DPFC_CTL_SA
, 0);
269 intel_de_write(dev_priv
, DPFC_CPU_FENCE_OFFSET
, 0);
273 intel_de_write(dev_priv
, ILK_DPFC_FENCE_YOFF
,
274 params
->fence_y_offset
);
276 intel_de_write(dev_priv
, ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
278 intel_fbc_recompress(dev_priv
);
281 static void ilk_fbc_deactivate(struct drm_i915_private
*dev_priv
)
285 /* Disable compression */
286 dpfc_ctl
= intel_de_read(dev_priv
, ILK_DPFC_CONTROL
);
287 if (dpfc_ctl
& DPFC_CTL_EN
) {
288 dpfc_ctl
&= ~DPFC_CTL_EN
;
289 intel_de_write(dev_priv
, ILK_DPFC_CONTROL
, dpfc_ctl
);
293 static bool ilk_fbc_is_active(struct drm_i915_private
*dev_priv
)
295 return intel_de_read(dev_priv
, ILK_DPFC_CONTROL
) & DPFC_CTL_EN
;
298 static void gen7_fbc_activate(struct drm_i915_private
*dev_priv
)
300 struct intel_fbc_reg_params
*params
= &dev_priv
->fbc
.params
;
302 int threshold
= dev_priv
->fbc
.threshold
;
304 /* Display WA #0529: skl, kbl, bxt. */
305 if (IS_GEN9_BC(dev_priv
) || IS_BROXTON(dev_priv
)) {
306 u32 val
= intel_de_read(dev_priv
, CHICKEN_MISC_4
);
308 val
&= ~(FBC_STRIDE_OVERRIDE
| FBC_STRIDE_MASK
);
310 if (params
->gen9_wa_cfb_stride
)
311 val
|= FBC_STRIDE_OVERRIDE
| params
->gen9_wa_cfb_stride
;
313 intel_de_write(dev_priv
, CHICKEN_MISC_4
, val
);
317 if (IS_IVYBRIDGE(dev_priv
))
318 dpfc_ctl
|= IVB_DPFC_CTL_PLANE(params
->crtc
.i9xx_plane
);
320 if (params
->fb
.format
->cpp
[0] == 2)
326 dpfc_ctl
|= DPFC_CTL_LIMIT_4X
;
329 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
332 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
336 if (params
->fence_id
>= 0) {
337 dpfc_ctl
|= IVB_DPFC_CTL_FENCE_EN
;
338 intel_de_write(dev_priv
, SNB_DPFC_CTL_SA
,
339 SNB_CPU_FENCE_ENABLE
| params
->fence_id
);
340 intel_de_write(dev_priv
, DPFC_CPU_FENCE_OFFSET
,
341 params
->fence_y_offset
);
342 } else if (dev_priv
->ggtt
.num_fences
) {
343 intel_de_write(dev_priv
, SNB_DPFC_CTL_SA
, 0);
344 intel_de_write(dev_priv
, DPFC_CPU_FENCE_OFFSET
, 0);
347 if (dev_priv
->fbc
.false_color
)
348 dpfc_ctl
|= FBC_CTL_FALSE_COLOR
;
350 intel_de_write(dev_priv
, ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
352 intel_fbc_recompress(dev_priv
);
355 static bool intel_fbc_hw_is_active(struct drm_i915_private
*dev_priv
)
357 if (INTEL_GEN(dev_priv
) >= 5)
358 return ilk_fbc_is_active(dev_priv
);
359 else if (IS_GM45(dev_priv
))
360 return g4x_fbc_is_active(dev_priv
);
362 return i8xx_fbc_is_active(dev_priv
);
365 static void intel_fbc_hw_activate(struct drm_i915_private
*dev_priv
)
367 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
369 trace_intel_fbc_activate(fbc
->crtc
);
372 fbc
->activated
= true;
374 if (INTEL_GEN(dev_priv
) >= 7)
375 gen7_fbc_activate(dev_priv
);
376 else if (INTEL_GEN(dev_priv
) >= 5)
377 ilk_fbc_activate(dev_priv
);
378 else if (IS_GM45(dev_priv
))
379 g4x_fbc_activate(dev_priv
);
381 i8xx_fbc_activate(dev_priv
);
384 static void intel_fbc_hw_deactivate(struct drm_i915_private
*dev_priv
)
386 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
388 trace_intel_fbc_deactivate(fbc
->crtc
);
392 if (INTEL_GEN(dev_priv
) >= 5)
393 ilk_fbc_deactivate(dev_priv
);
394 else if (IS_GM45(dev_priv
))
395 g4x_fbc_deactivate(dev_priv
);
397 i8xx_fbc_deactivate(dev_priv
);
401 * intel_fbc_is_active - Is FBC active?
402 * @dev_priv: i915 device instance
404 * This function is used to verify the current state of FBC.
406 * FIXME: This should be tracked in the plane config eventually
407 * instead of queried at runtime for most callers.
409 bool intel_fbc_is_active(struct drm_i915_private
*dev_priv
)
411 return dev_priv
->fbc
.active
;
414 static void intel_fbc_deactivate(struct drm_i915_private
*dev_priv
,
417 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
419 drm_WARN_ON(&dev_priv
->drm
, !mutex_is_locked(&fbc
->lock
));
422 intel_fbc_hw_deactivate(dev_priv
);
424 fbc
->no_fbc_reason
= reason
;
427 static u64
intel_fbc_cfb_base_max(struct drm_i915_private
*i915
)
429 if (INTEL_GEN(i915
) >= 5 || IS_G4X(i915
))
435 static int find_compression_threshold(struct drm_i915_private
*dev_priv
,
436 struct drm_mm_node
*node
,
440 int compression_threshold
= 1;
444 /* The FBC hardware for BDW/SKL doesn't have access to the stolen
445 * reserved range size, so it always assumes the maximum (8mb) is used.
446 * If we enable FBC using a CFB on that memory range we'll get FIFO
447 * underruns, even if that range is not reserved by the BIOS. */
448 if (IS_BROADWELL(dev_priv
) || IS_GEN9_BC(dev_priv
))
449 end
= resource_size(&dev_priv
->dsm
) - 8 * 1024 * 1024;
453 end
= min(end
, intel_fbc_cfb_base_max(dev_priv
));
455 /* HACK: This code depends on what we will do in *_enable_fbc. If that
456 * code changes, this code needs to change as well.
458 * The enable_fbc code will attempt to use one of our 2 compression
459 * thresholds, therefore, in that case, we only have 1 resort.
462 /* Try to over-allocate to reduce reallocations and fragmentation. */
463 ret
= i915_gem_stolen_insert_node_in_range(dev_priv
, node
, size
<<= 1,
466 return compression_threshold
;
469 /* HW's ability to limit the CFB is 1:4 */
470 if (compression_threshold
> 4 ||
471 (fb_cpp
== 2 && compression_threshold
== 2))
474 ret
= i915_gem_stolen_insert_node_in_range(dev_priv
, node
, size
>>= 1,
476 if (ret
&& INTEL_GEN(dev_priv
) <= 4) {
479 compression_threshold
<<= 1;
482 return compression_threshold
;
486 static int intel_fbc_alloc_cfb(struct drm_i915_private
*dev_priv
,
487 unsigned int size
, unsigned int fb_cpp
)
489 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
490 struct drm_mm_node
*compressed_llb
;
493 drm_WARN_ON(&dev_priv
->drm
,
494 drm_mm_node_allocated(&fbc
->compressed_fb
));
496 ret
= find_compression_threshold(dev_priv
, &fbc
->compressed_fb
,
501 drm_info_once(&dev_priv
->drm
,
502 "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
505 fbc
->threshold
= ret
;
507 if (INTEL_GEN(dev_priv
) >= 5)
508 intel_de_write(dev_priv
, ILK_DPFC_CB_BASE
,
509 fbc
->compressed_fb
.start
);
510 else if (IS_GM45(dev_priv
)) {
511 intel_de_write(dev_priv
, DPFC_CB_BASE
,
512 fbc
->compressed_fb
.start
);
514 compressed_llb
= kzalloc(sizeof(*compressed_llb
), GFP_KERNEL
);
518 ret
= i915_gem_stolen_insert_node(dev_priv
, compressed_llb
,
523 fbc
->compressed_llb
= compressed_llb
;
525 GEM_BUG_ON(range_overflows_end_t(u64
, dev_priv
->dsm
.start
,
526 fbc
->compressed_fb
.start
,
528 GEM_BUG_ON(range_overflows_end_t(u64
, dev_priv
->dsm
.start
,
529 fbc
->compressed_llb
->start
,
531 intel_de_write(dev_priv
, FBC_CFB_BASE
,
532 dev_priv
->dsm
.start
+ fbc
->compressed_fb
.start
);
533 intel_de_write(dev_priv
, FBC_LL_BASE
,
534 dev_priv
->dsm
.start
+ compressed_llb
->start
);
537 drm_dbg_kms(&dev_priv
->drm
,
538 "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
539 fbc
->compressed_fb
.size
, fbc
->threshold
);
544 kfree(compressed_llb
);
545 i915_gem_stolen_remove_node(dev_priv
, &fbc
->compressed_fb
);
547 if (drm_mm_initialized(&dev_priv
->mm
.stolen
))
548 drm_info_once(&dev_priv
->drm
, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size
);
552 static void __intel_fbc_cleanup_cfb(struct drm_i915_private
*dev_priv
)
554 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
556 if (WARN_ON(intel_fbc_hw_is_active(dev_priv
)))
559 if (!drm_mm_node_allocated(&fbc
->compressed_fb
))
562 if (fbc
->compressed_llb
) {
563 i915_gem_stolen_remove_node(dev_priv
, fbc
->compressed_llb
);
564 kfree(fbc
->compressed_llb
);
567 i915_gem_stolen_remove_node(dev_priv
, &fbc
->compressed_fb
);
570 void intel_fbc_cleanup_cfb(struct drm_i915_private
*dev_priv
)
572 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
574 if (!HAS_FBC(dev_priv
))
577 mutex_lock(&fbc
->lock
);
578 __intel_fbc_cleanup_cfb(dev_priv
);
579 mutex_unlock(&fbc
->lock
);
582 static bool stride_is_valid(struct drm_i915_private
*dev_priv
,
583 u64 modifier
, unsigned int stride
)
585 /* This should have been caught earlier. */
586 if (drm_WARN_ON_ONCE(&dev_priv
->drm
, (stride
& (64 - 1)) != 0))
589 /* Below are the additional FBC restrictions. */
593 if (IS_GEN(dev_priv
, 2) || IS_GEN(dev_priv
, 3))
594 return stride
== 4096 || stride
== 8192;
596 if (IS_GEN(dev_priv
, 4) && !IS_G4X(dev_priv
) && stride
< 2048)
599 /* Display WA #1105: skl,bxt,kbl,cfl,glk */
600 if (IS_GEN(dev_priv
, 9) &&
601 modifier
== DRM_FORMAT_MOD_LINEAR
&& stride
& 511)
610 static bool pixel_format_is_valid(struct drm_i915_private
*dev_priv
,
613 switch (pixel_format
) {
614 case DRM_FORMAT_XRGB8888
:
615 case DRM_FORMAT_XBGR8888
:
617 case DRM_FORMAT_XRGB1555
:
618 case DRM_FORMAT_RGB565
:
619 /* 16bpp not supported on gen2 */
620 if (IS_GEN(dev_priv
, 2))
622 /* WaFbcOnly1to1Ratio:ctg */
623 if (IS_G4X(dev_priv
))
631 static bool rotation_is_valid(struct drm_i915_private
*dev_priv
,
632 u32 pixel_format
, unsigned int rotation
)
634 if (INTEL_GEN(dev_priv
) >= 9 && pixel_format
== DRM_FORMAT_RGB565
&&
635 drm_rotation_90_or_270(rotation
))
637 else if (INTEL_GEN(dev_priv
) <= 4 && !IS_G4X(dev_priv
) &&
638 rotation
!= DRM_MODE_ROTATE_0
)
645 * For some reason, the hardware tracking starts looking at whatever we
646 * programmed as the display plane base address register. It does not look at
647 * the X and Y offset registers. That's why we include the src x/y offsets
648 * instead of just looking at the plane size.
650 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc
*crtc
)
652 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
653 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
654 unsigned int effective_w
, effective_h
, max_w
, max_h
;
656 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
)) {
659 } else if (INTEL_GEN(dev_priv
) >= 8 || IS_HASWELL(dev_priv
)) {
662 } else if (IS_G4X(dev_priv
) || INTEL_GEN(dev_priv
) >= 5) {
670 intel_fbc_get_plane_source_size(&fbc
->state_cache
, &effective_w
,
672 effective_w
+= fbc
->state_cache
.plane
.adjusted_x
;
673 effective_h
+= fbc
->state_cache
.plane
.adjusted_y
;
675 return effective_w
<= max_w
&& effective_h
<= max_h
;
678 static bool tiling_is_valid(struct drm_i915_private
*dev_priv
,
682 case DRM_FORMAT_MOD_LINEAR
:
683 if (INTEL_GEN(dev_priv
) >= 9)
686 case I915_FORMAT_MOD_X_TILED
:
687 case I915_FORMAT_MOD_Y_TILED
:
694 static void intel_fbc_update_state_cache(struct intel_crtc
*crtc
,
695 const struct intel_crtc_state
*crtc_state
,
696 const struct intel_plane_state
*plane_state
)
698 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
699 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
700 struct intel_fbc_state_cache
*cache
= &fbc
->state_cache
;
701 struct drm_framebuffer
*fb
= plane_state
->hw
.fb
;
703 cache
->plane
.visible
= plane_state
->uapi
.visible
;
704 if (!cache
->plane
.visible
)
707 cache
->crtc
.mode_flags
= crtc_state
->hw
.adjusted_mode
.flags
;
708 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
709 cache
->crtc
.hsw_bdw_pixel_rate
= crtc_state
->pixel_rate
;
711 cache
->plane
.rotation
= plane_state
->hw
.rotation
;
713 * Src coordinates are already rotated by 270 degrees for
714 * the 90/270 degree plane rotation cases (to match the
715 * GTT mapping), hence no need to account for rotation here.
717 cache
->plane
.src_w
= drm_rect_width(&plane_state
->uapi
.src
) >> 16;
718 cache
->plane
.src_h
= drm_rect_height(&plane_state
->uapi
.src
) >> 16;
719 cache
->plane
.adjusted_x
= plane_state
->color_plane
[0].x
;
720 cache
->plane
.adjusted_y
= plane_state
->color_plane
[0].y
;
722 cache
->plane
.pixel_blend_mode
= plane_state
->hw
.pixel_blend_mode
;
724 cache
->fb
.format
= fb
->format
;
725 cache
->fb
.modifier
= fb
->modifier
;
727 /* FIXME is this correct? */
728 cache
->fb
.stride
= plane_state
->color_plane
[0].stride
;
729 if (drm_rotation_90_or_270(plane_state
->hw
.rotation
))
730 cache
->fb
.stride
*= fb
->format
->cpp
[0];
732 /* FBC1 compression interval: arbitrary choice of 1 second */
733 cache
->interval
= drm_mode_vrefresh(&crtc_state
->hw
.adjusted_mode
);
735 cache
->fence_y_offset
= intel_plane_fence_y_offset(plane_state
);
737 drm_WARN_ON(&dev_priv
->drm
, plane_state
->flags
& PLANE_HAS_FENCE
&&
738 !plane_state
->vma
->fence
);
740 if (plane_state
->flags
& PLANE_HAS_FENCE
&&
741 plane_state
->vma
->fence
)
742 cache
->fence_id
= plane_state
->vma
->fence
->id
;
744 cache
->fence_id
= -1;
747 static bool intel_fbc_cfb_size_changed(struct drm_i915_private
*dev_priv
)
749 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
751 return intel_fbc_calculate_cfb_size(dev_priv
, &fbc
->state_cache
) >
752 fbc
->compressed_fb
.size
* fbc
->threshold
;
755 static u16
intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private
*dev_priv
)
757 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
758 struct intel_fbc_state_cache
*cache
= &fbc
->state_cache
;
760 if ((IS_GEN9_BC(dev_priv
) || IS_BROXTON(dev_priv
)) &&
761 cache
->fb
.modifier
!= I915_FORMAT_MOD_X_TILED
)
762 return DIV_ROUND_UP(cache
->plane
.src_w
, 32 * fbc
->threshold
) * 8;
767 static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private
*dev_priv
)
769 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
771 return fbc
->params
.gen9_wa_cfb_stride
!= intel_fbc_gen9_wa_cfb_stride(dev_priv
);
774 static bool intel_fbc_can_enable(struct drm_i915_private
*dev_priv
)
776 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
778 if (intel_vgpu_active(dev_priv
)) {
779 fbc
->no_fbc_reason
= "VGPU is active";
783 if (!dev_priv
->params
.enable_fbc
) {
784 fbc
->no_fbc_reason
= "disabled per module param or by default";
788 if (fbc
->underrun_detected
) {
789 fbc
->no_fbc_reason
= "underrun detected";
796 static bool intel_fbc_can_activate(struct intel_crtc
*crtc
)
798 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
799 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
800 struct intel_fbc_state_cache
*cache
= &fbc
->state_cache
;
802 if (!intel_fbc_can_enable(dev_priv
))
805 if (!cache
->plane
.visible
) {
806 fbc
->no_fbc_reason
= "primary plane not visible";
810 /* We don't need to use a state cache here since this information is
811 * global for all CRTC.
813 if (fbc
->underrun_detected
) {
814 fbc
->no_fbc_reason
= "underrun detected";
818 if (cache
->crtc
.mode_flags
& DRM_MODE_FLAG_INTERLACE
) {
819 fbc
->no_fbc_reason
= "incompatible mode";
823 if (!intel_fbc_hw_tracking_covers_screen(crtc
)) {
824 fbc
->no_fbc_reason
= "mode too large for compression";
828 /* The use of a CPU fence is one of two ways to detect writes by the
829 * CPU to the scanout and trigger updates to the FBC.
831 * The other method is by software tracking (see
832 * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
833 * the current compressed buffer and recompress it.
835 * Note that is possible for a tiled surface to be unmappable (and
836 * so have no fence associated with it) due to aperture constraints
837 * at the time of pinning.
839 * FIXME with 90/270 degree rotation we should use the fence on
840 * the normal GTT view (the rotated view doesn't even have a
841 * fence). Would need changes to the FBC fence Y offset as well.
842 * For now this will effectively disable FBC with 90/270 degree
845 if (INTEL_GEN(dev_priv
) < 9 && cache
->fence_id
< 0) {
846 fbc
->no_fbc_reason
= "framebuffer not tiled or fenced";
850 if (!pixel_format_is_valid(dev_priv
, cache
->fb
.format
->format
)) {
851 fbc
->no_fbc_reason
= "pixel format is invalid";
855 if (!rotation_is_valid(dev_priv
, cache
->fb
.format
->format
,
856 cache
->plane
.rotation
)) {
857 fbc
->no_fbc_reason
= "rotation unsupported";
861 if (!tiling_is_valid(dev_priv
, cache
->fb
.modifier
)) {
862 fbc
->no_fbc_reason
= "tiling unsupported";
866 if (!stride_is_valid(dev_priv
, cache
->fb
.modifier
, cache
->fb
.stride
)) {
867 fbc
->no_fbc_reason
= "framebuffer stride not supported";
871 if (cache
->plane
.pixel_blend_mode
!= DRM_MODE_BLEND_PIXEL_NONE
&&
872 cache
->fb
.format
->has_alpha
) {
873 fbc
->no_fbc_reason
= "per-pixel alpha blending is incompatible with FBC";
877 /* WaFbcExceedCdClockThreshold:hsw,bdw */
878 if ((IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) &&
879 cache
->crtc
.hsw_bdw_pixel_rate
>= dev_priv
->cdclk
.hw
.cdclk
* 95 / 100) {
880 fbc
->no_fbc_reason
= "pixel rate is too big";
884 /* It is possible for the required CFB size change without a
885 * crtc->disable + crtc->enable since it is possible to change the
886 * stride without triggering a full modeset. Since we try to
887 * over-allocate the CFB, there's a chance we may keep FBC enabled even
888 * if this happens, but if we exceed the current CFB size we'll have to
889 * disable FBC. Notice that it would be possible to disable FBC, wait
890 * for a frame, free the stolen node, then try to reenable FBC in case
891 * we didn't get any invalidate/deactivate calls, but this would require
892 * a lot of tracking just for a specific case. If we conclude it's an
893 * important case, we can implement it later. */
894 if (intel_fbc_cfb_size_changed(dev_priv
)) {
895 fbc
->no_fbc_reason
= "CFB requirements changed";
900 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
901 * having a Y offset that isn't divisible by 4 causes FIFO underrun
902 * and screen flicker.
904 if (INTEL_GEN(dev_priv
) >= 9 &&
905 (fbc
->state_cache
.plane
.adjusted_y
& 3)) {
906 fbc
->no_fbc_reason
= "plane Y offset is misaligned";
910 /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
911 if (INTEL_GEN(dev_priv
) >= 11 &&
912 (cache
->plane
.src_h
+ cache
->plane
.adjusted_y
) % 4) {
913 fbc
->no_fbc_reason
= "plane height + offset is non-modulo of 4";
920 static void intel_fbc_get_reg_params(struct intel_crtc
*crtc
,
921 struct intel_fbc_reg_params
*params
)
923 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
924 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
925 struct intel_fbc_state_cache
*cache
= &fbc
->state_cache
;
927 /* Since all our fields are integer types, use memset here so the
928 * comparison function can rely on memcmp because the padding will be
930 memset(params
, 0, sizeof(*params
));
932 params
->fence_id
= cache
->fence_id
;
933 params
->fence_y_offset
= cache
->fence_y_offset
;
935 params
->interval
= cache
->interval
;
937 params
->crtc
.pipe
= crtc
->pipe
;
938 params
->crtc
.i9xx_plane
= to_intel_plane(crtc
->base
.primary
)->i9xx_plane
;
940 params
->fb
.format
= cache
->fb
.format
;
941 params
->fb
.modifier
= cache
->fb
.modifier
;
942 params
->fb
.stride
= cache
->fb
.stride
;
944 params
->cfb_size
= intel_fbc_calculate_cfb_size(dev_priv
, cache
);
946 params
->gen9_wa_cfb_stride
= cache
->gen9_wa_cfb_stride
;
948 params
->plane_visible
= cache
->plane
.visible
;
951 static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state
*crtc_state
)
953 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
954 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
955 const struct intel_fbc
*fbc
= &dev_priv
->fbc
;
956 const struct intel_fbc_state_cache
*cache
= &fbc
->state_cache
;
957 const struct intel_fbc_reg_params
*params
= &fbc
->params
;
959 if (drm_atomic_crtc_needs_modeset(&crtc_state
->uapi
))
962 if (!params
->plane_visible
)
965 if (!intel_fbc_can_activate(crtc
))
968 if (params
->fb
.format
!= cache
->fb
.format
)
971 if (params
->fb
.modifier
!= cache
->fb
.modifier
)
974 if (params
->fb
.stride
!= cache
->fb
.stride
)
977 if (params
->cfb_size
!= intel_fbc_calculate_cfb_size(dev_priv
, cache
))
980 if (params
->gen9_wa_cfb_stride
!= cache
->gen9_wa_cfb_stride
)
986 bool intel_fbc_pre_update(struct intel_atomic_state
*state
,
987 struct intel_crtc
*crtc
)
989 struct intel_plane
*plane
= to_intel_plane(crtc
->base
.primary
);
990 const struct intel_crtc_state
*crtc_state
=
991 intel_atomic_get_new_crtc_state(state
, crtc
);
992 const struct intel_plane_state
*plane_state
=
993 intel_atomic_get_new_plane_state(state
, plane
);
994 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
995 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
996 const char *reason
= "update pending";
997 bool need_vblank_wait
= false;
999 if (!plane
->has_fbc
|| !plane_state
)
1000 return need_vblank_wait
;
1002 mutex_lock(&fbc
->lock
);
1004 if (fbc
->crtc
!= crtc
)
1007 intel_fbc_update_state_cache(crtc
, crtc_state
, plane_state
);
1008 fbc
->flip_pending
= true;
1010 if (!intel_fbc_can_flip_nuke(crtc_state
)) {
1011 intel_fbc_deactivate(dev_priv
, reason
);
1014 * Display WA #1198: glk+
1015 * Need an extra vblank wait between FBC disable and most plane
1016 * updates. Bspec says this is only needed for plane disable, but
1017 * that is not true. Touching most plane registers will cause the
1018 * corruption to appear. Also SKL/derivatives do not seem to be
1021 * TODO: could optimize this a bit by sampling the frame
1022 * counter when we disable FBC (if it was already done earlier)
1023 * and skipping the extra vblank wait before the plane update
1024 * if at least one frame has already passed.
1026 if (fbc
->activated
&&
1027 (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
)))
1028 need_vblank_wait
= true;
1029 fbc
->activated
= false;
1032 mutex_unlock(&fbc
->lock
);
1034 return need_vblank_wait
;
1038 * __intel_fbc_disable - disable FBC
1039 * @dev_priv: i915 device instance
1041 * This is the low level function that actually disables FBC. Callers should
1042 * grab the FBC lock.
1044 static void __intel_fbc_disable(struct drm_i915_private
*dev_priv
)
1046 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1047 struct intel_crtc
*crtc
= fbc
->crtc
;
1049 drm_WARN_ON(&dev_priv
->drm
, !mutex_is_locked(&fbc
->lock
));
1050 drm_WARN_ON(&dev_priv
->drm
, !fbc
->crtc
);
1051 drm_WARN_ON(&dev_priv
->drm
, fbc
->active
);
1053 drm_dbg_kms(&dev_priv
->drm
, "Disabling FBC on pipe %c\n",
1054 pipe_name(crtc
->pipe
));
1056 __intel_fbc_cleanup_cfb(dev_priv
);
1061 static void __intel_fbc_post_update(struct intel_crtc
*crtc
)
1063 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1064 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1066 drm_WARN_ON(&dev_priv
->drm
, !mutex_is_locked(&fbc
->lock
));
1068 if (fbc
->crtc
!= crtc
)
1071 fbc
->flip_pending
= false;
1073 if (!dev_priv
->params
.enable_fbc
) {
1074 intel_fbc_deactivate(dev_priv
, "disabled at runtime per module param");
1075 __intel_fbc_disable(dev_priv
);
1080 intel_fbc_get_reg_params(crtc
, &fbc
->params
);
1082 if (!intel_fbc_can_activate(crtc
))
1085 if (!fbc
->busy_bits
)
1086 intel_fbc_hw_activate(dev_priv
);
1088 intel_fbc_deactivate(dev_priv
, "frontbuffer write");
1091 void intel_fbc_post_update(struct intel_atomic_state
*state
,
1092 struct intel_crtc
*crtc
)
1094 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1095 struct intel_plane
*plane
= to_intel_plane(crtc
->base
.primary
);
1096 const struct intel_plane_state
*plane_state
=
1097 intel_atomic_get_new_plane_state(state
, plane
);
1098 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1100 if (!plane
->has_fbc
|| !plane_state
)
1103 mutex_lock(&fbc
->lock
);
1104 __intel_fbc_post_update(crtc
);
1105 mutex_unlock(&fbc
->lock
);
1108 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc
*fbc
)
1111 return to_intel_plane(fbc
->crtc
->base
.primary
)->frontbuffer_bit
;
1113 return fbc
->possible_framebuffer_bits
;
1116 void intel_fbc_invalidate(struct drm_i915_private
*dev_priv
,
1117 unsigned int frontbuffer_bits
,
1118 enum fb_op_origin origin
)
1120 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1122 if (!HAS_FBC(dev_priv
))
1125 if (origin
== ORIGIN_GTT
|| origin
== ORIGIN_FLIP
)
1128 mutex_lock(&fbc
->lock
);
1130 fbc
->busy_bits
|= intel_fbc_get_frontbuffer_bit(fbc
) & frontbuffer_bits
;
1132 if (fbc
->crtc
&& fbc
->busy_bits
)
1133 intel_fbc_deactivate(dev_priv
, "frontbuffer write");
1135 mutex_unlock(&fbc
->lock
);
1138 void intel_fbc_flush(struct drm_i915_private
*dev_priv
,
1139 unsigned int frontbuffer_bits
, enum fb_op_origin origin
)
1141 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1143 if (!HAS_FBC(dev_priv
))
1147 * GTT tracking does not nuke the entire cfb
1148 * so don't clear busy_bits set for some other
1151 if (origin
== ORIGIN_GTT
)
1154 mutex_lock(&fbc
->lock
);
1156 fbc
->busy_bits
&= ~frontbuffer_bits
;
1158 if (origin
== ORIGIN_FLIP
)
1161 if (!fbc
->busy_bits
&& fbc
->crtc
&&
1162 (frontbuffer_bits
& intel_fbc_get_frontbuffer_bit(fbc
))) {
1164 intel_fbc_recompress(dev_priv
);
1165 else if (!fbc
->flip_pending
)
1166 __intel_fbc_post_update(fbc
->crtc
);
1170 mutex_unlock(&fbc
->lock
);
1174 * intel_fbc_choose_crtc - select a CRTC to enable FBC on
1175 * @dev_priv: i915 device instance
1176 * @state: the atomic state structure
1178 * This function looks at the proposed state for CRTCs and planes, then chooses
1179 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
1182 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
1183 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
1185 void intel_fbc_choose_crtc(struct drm_i915_private
*dev_priv
,
1186 struct intel_atomic_state
*state
)
1188 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1189 struct intel_plane
*plane
;
1190 struct intel_plane_state
*plane_state
;
1191 bool crtc_chosen
= false;
1194 mutex_lock(&fbc
->lock
);
1196 /* Does this atomic commit involve the CRTC currently tied to FBC? */
1198 !intel_atomic_get_new_crtc_state(state
, fbc
->crtc
))
1201 if (!intel_fbc_can_enable(dev_priv
))
1204 /* Simply choose the first CRTC that is compatible and has a visible
1205 * plane. We could go for fancier schemes such as checking the plane
1206 * size, but this would just affect the few platforms that don't tie FBC
1207 * to pipe or plane A. */
1208 for_each_new_intel_plane_in_state(state
, plane
, plane_state
, i
) {
1209 struct intel_crtc_state
*crtc_state
;
1210 struct intel_crtc
*crtc
= to_intel_crtc(plane_state
->hw
.crtc
);
1212 if (!plane
->has_fbc
)
1215 if (!plane_state
->uapi
.visible
)
1218 crtc_state
= intel_atomic_get_new_crtc_state(state
, crtc
);
1220 crtc_state
->enable_fbc
= true;
1226 fbc
->no_fbc_reason
= "no suitable CRTC for FBC";
1229 mutex_unlock(&fbc
->lock
);
1233 * intel_fbc_enable: tries to enable FBC on the CRTC
1235 * @state: corresponding &drm_crtc_state for @crtc
1237 * This function checks if the given CRTC was chosen for FBC, then enables it if
1238 * possible. Notice that it doesn't activate FBC. It is valid to call
1239 * intel_fbc_enable multiple times for the same pipe without an
1240 * intel_fbc_disable in the middle, as long as it is deactivated.
1242 void intel_fbc_enable(struct intel_atomic_state
*state
,
1243 struct intel_crtc
*crtc
)
1245 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1246 struct intel_plane
*plane
= to_intel_plane(crtc
->base
.primary
);
1247 const struct intel_crtc_state
*crtc_state
=
1248 intel_atomic_get_new_crtc_state(state
, crtc
);
1249 const struct intel_plane_state
*plane_state
=
1250 intel_atomic_get_new_plane_state(state
, plane
);
1251 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1252 struct intel_fbc_state_cache
*cache
= &fbc
->state_cache
;
1254 if (!plane
->has_fbc
|| !plane_state
)
1257 mutex_lock(&fbc
->lock
);
1260 if (fbc
->crtc
!= crtc
||
1261 (!intel_fbc_cfb_size_changed(dev_priv
) &&
1262 !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv
)))
1265 __intel_fbc_disable(dev_priv
);
1268 drm_WARN_ON(&dev_priv
->drm
, fbc
->active
);
1270 intel_fbc_update_state_cache(crtc
, crtc_state
, plane_state
);
1272 /* FIXME crtc_state->enable_fbc lies :( */
1273 if (!cache
->plane
.visible
)
1276 if (intel_fbc_alloc_cfb(dev_priv
,
1277 intel_fbc_calculate_cfb_size(dev_priv
, cache
),
1278 plane_state
->hw
.fb
->format
->cpp
[0])) {
1279 cache
->plane
.visible
= false;
1280 fbc
->no_fbc_reason
= "not enough stolen memory";
1284 cache
->gen9_wa_cfb_stride
= intel_fbc_gen9_wa_cfb_stride(dev_priv
);
1286 drm_dbg_kms(&dev_priv
->drm
, "Enabling FBC on pipe %c\n",
1287 pipe_name(crtc
->pipe
));
1288 fbc
->no_fbc_reason
= "FBC enabled but not active yet\n";
1292 mutex_unlock(&fbc
->lock
);
1296 * intel_fbc_disable - disable FBC if it's associated with crtc
1299 * This function disables FBC if it's associated with the provided CRTC.
1301 void intel_fbc_disable(struct intel_crtc
*crtc
)
1303 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1304 struct intel_plane
*plane
= to_intel_plane(crtc
->base
.primary
);
1305 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1307 if (!plane
->has_fbc
)
1310 mutex_lock(&fbc
->lock
);
1311 if (fbc
->crtc
== crtc
)
1312 __intel_fbc_disable(dev_priv
);
1313 mutex_unlock(&fbc
->lock
);
1317 * intel_fbc_global_disable - globally disable FBC
1318 * @dev_priv: i915 device instance
1320 * This function disables FBC regardless of which CRTC is associated with it.
1322 void intel_fbc_global_disable(struct drm_i915_private
*dev_priv
)
1324 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1326 if (!HAS_FBC(dev_priv
))
1329 mutex_lock(&fbc
->lock
);
1331 drm_WARN_ON(&dev_priv
->drm
, fbc
->crtc
->active
);
1332 __intel_fbc_disable(dev_priv
);
1334 mutex_unlock(&fbc
->lock
);
1337 static void intel_fbc_underrun_work_fn(struct work_struct
*work
)
1339 struct drm_i915_private
*dev_priv
=
1340 container_of(work
, struct drm_i915_private
, fbc
.underrun_work
);
1341 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1343 mutex_lock(&fbc
->lock
);
1345 /* Maybe we were scheduled twice. */
1346 if (fbc
->underrun_detected
|| !fbc
->crtc
)
1349 drm_dbg_kms(&dev_priv
->drm
, "Disabling FBC due to FIFO underrun.\n");
1350 fbc
->underrun_detected
= true;
1352 intel_fbc_deactivate(dev_priv
, "FIFO underrun");
1354 mutex_unlock(&fbc
->lock
);
1358 * intel_fbc_reset_underrun - reset FBC fifo underrun status.
1359 * @dev_priv: i915 device instance
1361 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
1362 * want to re-enable FBC after an underrun to increase test coverage.
1364 int intel_fbc_reset_underrun(struct drm_i915_private
*dev_priv
)
1368 cancel_work_sync(&dev_priv
->fbc
.underrun_work
);
1370 ret
= mutex_lock_interruptible(&dev_priv
->fbc
.lock
);
1374 if (dev_priv
->fbc
.underrun_detected
) {
1375 drm_dbg_kms(&dev_priv
->drm
,
1376 "Re-allowing FBC after fifo underrun\n");
1377 dev_priv
->fbc
.no_fbc_reason
= "FIFO underrun cleared";
1380 dev_priv
->fbc
.underrun_detected
= false;
1381 mutex_unlock(&dev_priv
->fbc
.lock
);
1387 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1388 * @dev_priv: i915 device instance
1390 * Without FBC, most underruns are harmless and don't really cause too many
1391 * problems, except for an annoying message on dmesg. With FBC, underruns can
1392 * become black screens or even worse, especially when paired with bad
1393 * watermarks. So in order for us to be on the safe side, completely disable FBC
1394 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
1395 * already suggests that watermarks may be bad, so try to be as safe as
1398 * This function is called from the IRQ handler.
1400 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private
*dev_priv
)
1402 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1404 if (!HAS_FBC(dev_priv
))
1407 /* There's no guarantee that underrun_detected won't be set to true
1408 * right after this check and before the work is scheduled, but that's
1409 * not a problem since we'll check it again under the work function
1410 * while FBC is locked. This check here is just to prevent us from
1411 * unnecessarily scheduling the work, and it relies on the fact that we
1412 * never switch underrun_detect back to false after it's true. */
1413 if (READ_ONCE(fbc
->underrun_detected
))
1416 schedule_work(&fbc
->underrun_work
);
1420 * The DDX driver changes its behavior depending on the value it reads from
1421 * i915.enable_fbc, so sanitize it by translating the default value into either
1422 * 0 or 1 in order to allow it to know what's going on.
1424 * Notice that this is done at driver initialization and we still allow user
1425 * space to change the value during runtime without sanitizing it again. IGT
1426 * relies on being able to change i915.enable_fbc at runtime.
1428 static int intel_sanitize_fbc_option(struct drm_i915_private
*dev_priv
)
1430 if (dev_priv
->params
.enable_fbc
>= 0)
1431 return !!dev_priv
->params
.enable_fbc
;
1433 if (!HAS_FBC(dev_priv
))
1437 * Fbc is causing random underruns in CI execution on TGL platforms.
1438 * Disabling the same while the problem is being debugged and analyzed.
1440 if (IS_TIGERLAKE(dev_priv
))
1443 if (IS_BROADWELL(dev_priv
) || INTEL_GEN(dev_priv
) >= 9)
1449 static bool need_fbc_vtd_wa(struct drm_i915_private
*dev_priv
)
1451 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1452 if (intel_vtd_active() &&
1453 (IS_SKYLAKE(dev_priv
) || IS_BROXTON(dev_priv
))) {
1454 drm_info(&dev_priv
->drm
,
1455 "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1463 * intel_fbc_init - Initialize FBC
1464 * @dev_priv: the i915 device
1466 * This function might be called during PM init process.
1468 void intel_fbc_init(struct drm_i915_private
*dev_priv
)
1470 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1472 INIT_WORK(&fbc
->underrun_work
, intel_fbc_underrun_work_fn
);
1473 mutex_init(&fbc
->lock
);
1474 fbc
->active
= false;
1476 if (!drm_mm_initialized(&dev_priv
->mm
.stolen
))
1477 mkwrite_device_info(dev_priv
)->display
.has_fbc
= false;
1479 if (need_fbc_vtd_wa(dev_priv
))
1480 mkwrite_device_info(dev_priv
)->display
.has_fbc
= false;
1482 dev_priv
->params
.enable_fbc
= intel_sanitize_fbc_option(dev_priv
);
1483 drm_dbg_kms(&dev_priv
->drm
, "Sanitized enable_fbc value: %d\n",
1484 dev_priv
->params
.enable_fbc
);
1486 if (!HAS_FBC(dev_priv
)) {
1487 fbc
->no_fbc_reason
= "unsupported by this chipset";
1491 /* We still don't have any sort of hardware state readout for FBC, so
1492 * deactivate it in case the BIOS activated it to make sure software
1493 * matches the hardware state. */
1494 if (intel_fbc_hw_is_active(dev_priv
))
1495 intel_fbc_hw_deactivate(dev_priv
);