2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Frame Buffer Compression (FBC)
27 * FBC tries to save memory bandwidth (and so power consumption) by
28 * compressing the amount of memory used by the display. It is total
29 * transparent to user space and completely handled in the kernel.
31 * The benefits of FBC are mostly visible with solid backgrounds and
32 * variation-less patterns. It comes from keeping the memory footprint small
33 * and having fewer memory pages opened and accessed for refreshing the display.
35 * i915 is responsible to reserve stolen memory for FBC and configure its
36 * offset on proper registers. The hardware takes care of all
37 * compress/decompress. However there are many known cases where we have to
38 * forcibly disable it to allow proper screen updates.
41 #include <drm/drm_fourcc.h>
44 #include "intel_display_types.h"
45 #include "intel_fbc.h"
46 #include "intel_frontbuffer.h"
48 static inline bool fbc_supported(struct drm_i915_private
*dev_priv
)
50 return HAS_FBC(dev_priv
);
54 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
55 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
56 * origin so the x and y offsets can actually fit the registers. As a
57 * consequence, the fence doesn't really start exactly at the display plane
58 * address we program because it starts at the real start of the buffer, so we
59 * have to take this into consideration here.
61 static unsigned int get_crtc_fence_y_offset(struct intel_fbc
*fbc
)
63 return fbc
->state_cache
.plane
.y
- fbc
->state_cache
.plane
.adjusted_y
;
67 * For SKL+, the plane source size used by the hardware is based on the value we
68 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
69 * we wrote to PIPESRC.
71 static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache
*cache
,
72 int *width
, int *height
)
75 *width
= cache
->plane
.src_w
;
77 *height
= cache
->plane
.src_h
;
80 static int intel_fbc_calculate_cfb_size(struct drm_i915_private
*dev_priv
,
81 const struct intel_fbc_state_cache
*cache
)
85 intel_fbc_get_plane_source_size(cache
, NULL
, &lines
);
86 if (IS_GEN(dev_priv
, 7))
87 lines
= min(lines
, 2048);
88 else if (INTEL_GEN(dev_priv
) >= 8)
89 lines
= min(lines
, 2560);
91 /* Hardware needs the full buffer stride, not just the active area. */
92 return lines
* cache
->fb
.stride
;
95 static void i8xx_fbc_deactivate(struct drm_i915_private
*dev_priv
)
99 /* Disable compression */
100 fbc_ctl
= I915_READ(FBC_CONTROL
);
101 if ((fbc_ctl
& FBC_CTL_EN
) == 0)
104 fbc_ctl
&= ~FBC_CTL_EN
;
105 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
107 /* Wait for compressing bit to clear */
108 if (intel_de_wait_for_clear(dev_priv
, FBC_STATUS
,
109 FBC_STAT_COMPRESSING
, 10)) {
110 DRM_DEBUG_KMS("FBC idle timed out\n");
115 static void i8xx_fbc_activate(struct drm_i915_private
*dev_priv
)
117 struct intel_fbc_reg_params
*params
= &dev_priv
->fbc
.params
;
122 /* Note: fbc.threshold == 1 for i8xx */
123 cfb_pitch
= params
->cfb_size
/ FBC_LL_SIZE
;
124 if (params
->fb
.stride
< cfb_pitch
)
125 cfb_pitch
= params
->fb
.stride
;
127 /* FBC_CTL wants 32B or 64B units */
128 if (IS_GEN(dev_priv
, 2))
129 cfb_pitch
= (cfb_pitch
/ 32) - 1;
131 cfb_pitch
= (cfb_pitch
/ 64) - 1;
134 for (i
= 0; i
< (FBC_LL_SIZE
/ 32) + 1; i
++)
135 I915_WRITE(FBC_TAG(i
), 0);
137 if (IS_GEN(dev_priv
, 4)) {
141 fbc_ctl2
= FBC_CTL_FENCE_DBL
| FBC_CTL_IDLE_IMM
;
142 fbc_ctl2
|= FBC_CTL_PLANE(params
->crtc
.i9xx_plane
);
143 if (params
->fence_id
>= 0)
144 fbc_ctl2
|= FBC_CTL_CPU_FENCE
;
145 I915_WRITE(FBC_CONTROL2
, fbc_ctl2
);
146 I915_WRITE(FBC_FENCE_OFF
, params
->crtc
.fence_y_offset
);
150 fbc_ctl
= I915_READ(FBC_CONTROL
);
151 fbc_ctl
&= 0x3fff << FBC_CTL_INTERVAL_SHIFT
;
152 fbc_ctl
|= FBC_CTL_EN
| FBC_CTL_PERIODIC
;
153 if (IS_I945GM(dev_priv
))
154 fbc_ctl
|= FBC_CTL_C3_IDLE
; /* 945 needs special SR handling */
155 fbc_ctl
|= (cfb_pitch
& 0xff) << FBC_CTL_STRIDE_SHIFT
;
156 if (params
->fence_id
>= 0)
157 fbc_ctl
|= params
->fence_id
;
158 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
161 static bool i8xx_fbc_is_active(struct drm_i915_private
*dev_priv
)
163 return I915_READ(FBC_CONTROL
) & FBC_CTL_EN
;
166 static void g4x_fbc_activate(struct drm_i915_private
*dev_priv
)
168 struct intel_fbc_reg_params
*params
= &dev_priv
->fbc
.params
;
171 dpfc_ctl
= DPFC_CTL_PLANE(params
->crtc
.i9xx_plane
) | DPFC_SR_EN
;
172 if (params
->fb
.format
->cpp
[0] == 2)
173 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
175 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
177 if (params
->fence_id
>= 0) {
178 dpfc_ctl
|= DPFC_CTL_FENCE_EN
| params
->fence_id
;
179 I915_WRITE(DPFC_FENCE_YOFF
, params
->crtc
.fence_y_offset
);
181 I915_WRITE(DPFC_FENCE_YOFF
, 0);
185 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
188 static void g4x_fbc_deactivate(struct drm_i915_private
*dev_priv
)
192 /* Disable compression */
193 dpfc_ctl
= I915_READ(DPFC_CONTROL
);
194 if (dpfc_ctl
& DPFC_CTL_EN
) {
195 dpfc_ctl
&= ~DPFC_CTL_EN
;
196 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
);
200 static bool g4x_fbc_is_active(struct drm_i915_private
*dev_priv
)
202 return I915_READ(DPFC_CONTROL
) & DPFC_CTL_EN
;
205 /* This function forces a CFB recompression through the nuke operation. */
206 static void intel_fbc_recompress(struct drm_i915_private
*dev_priv
)
208 I915_WRITE(MSG_FBC_REND_STATE
, FBC_REND_NUKE
);
209 POSTING_READ(MSG_FBC_REND_STATE
);
212 static void ilk_fbc_activate(struct drm_i915_private
*dev_priv
)
214 struct intel_fbc_reg_params
*params
= &dev_priv
->fbc
.params
;
216 int threshold
= dev_priv
->fbc
.threshold
;
218 dpfc_ctl
= DPFC_CTL_PLANE(params
->crtc
.i9xx_plane
);
219 if (params
->fb
.format
->cpp
[0] == 2)
225 dpfc_ctl
|= DPFC_CTL_LIMIT_4X
;
228 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
231 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
235 if (params
->fence_id
>= 0) {
236 dpfc_ctl
|= DPFC_CTL_FENCE_EN
;
237 if (IS_GEN(dev_priv
, 5))
238 dpfc_ctl
|= params
->fence_id
;
239 if (IS_GEN(dev_priv
, 6)) {
240 I915_WRITE(SNB_DPFC_CTL_SA
,
241 SNB_CPU_FENCE_ENABLE
|
243 I915_WRITE(DPFC_CPU_FENCE_OFFSET
,
244 params
->crtc
.fence_y_offset
);
247 if (IS_GEN(dev_priv
, 6)) {
248 I915_WRITE(SNB_DPFC_CTL_SA
, 0);
249 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, 0);
253 I915_WRITE(ILK_DPFC_FENCE_YOFF
, params
->crtc
.fence_y_offset
);
255 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
257 intel_fbc_recompress(dev_priv
);
260 static void ilk_fbc_deactivate(struct drm_i915_private
*dev_priv
)
264 /* Disable compression */
265 dpfc_ctl
= I915_READ(ILK_DPFC_CONTROL
);
266 if (dpfc_ctl
& DPFC_CTL_EN
) {
267 dpfc_ctl
&= ~DPFC_CTL_EN
;
268 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
);
272 static bool ilk_fbc_is_active(struct drm_i915_private
*dev_priv
)
274 return I915_READ(ILK_DPFC_CONTROL
) & DPFC_CTL_EN
;
277 static void gen7_fbc_activate(struct drm_i915_private
*dev_priv
)
279 struct intel_fbc_reg_params
*params
= &dev_priv
->fbc
.params
;
281 int threshold
= dev_priv
->fbc
.threshold
;
283 /* Display WA #0529: skl, kbl, bxt. */
284 if (IS_GEN9_BC(dev_priv
) || IS_BROXTON(dev_priv
)) {
285 u32 val
= I915_READ(CHICKEN_MISC_4
);
287 val
&= ~(FBC_STRIDE_OVERRIDE
| FBC_STRIDE_MASK
);
289 if (params
->gen9_wa_cfb_stride
)
290 val
|= FBC_STRIDE_OVERRIDE
| params
->gen9_wa_cfb_stride
;
292 I915_WRITE(CHICKEN_MISC_4
, val
);
296 if (IS_IVYBRIDGE(dev_priv
))
297 dpfc_ctl
|= IVB_DPFC_CTL_PLANE(params
->crtc
.i9xx_plane
);
299 if (params
->fb
.format
->cpp
[0] == 2)
305 dpfc_ctl
|= DPFC_CTL_LIMIT_4X
;
308 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
311 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
315 if (params
->fence_id
>= 0) {
316 dpfc_ctl
|= IVB_DPFC_CTL_FENCE_EN
;
317 I915_WRITE(SNB_DPFC_CTL_SA
,
318 SNB_CPU_FENCE_ENABLE
|
320 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, params
->crtc
.fence_y_offset
);
322 I915_WRITE(SNB_DPFC_CTL_SA
,0);
323 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, 0);
326 if (dev_priv
->fbc
.false_color
)
327 dpfc_ctl
|= FBC_CTL_FALSE_COLOR
;
329 if (IS_IVYBRIDGE(dev_priv
)) {
330 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
331 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
332 I915_READ(ILK_DISPLAY_CHICKEN1
) |
334 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
335 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
336 I915_WRITE(CHICKEN_PIPESL_1(params
->crtc
.pipe
),
337 I915_READ(CHICKEN_PIPESL_1(params
->crtc
.pipe
)) |
341 if (INTEL_GEN(dev_priv
) >= 11)
342 /* Wa_1409120013:icl,ehl,tgl */
343 I915_WRITE(ILK_DPFC_CHICKEN
, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL
);
345 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
347 intel_fbc_recompress(dev_priv
);
350 static bool intel_fbc_hw_is_active(struct drm_i915_private
*dev_priv
)
352 if (INTEL_GEN(dev_priv
) >= 5)
353 return ilk_fbc_is_active(dev_priv
);
354 else if (IS_GM45(dev_priv
))
355 return g4x_fbc_is_active(dev_priv
);
357 return i8xx_fbc_is_active(dev_priv
);
360 static void intel_fbc_hw_activate(struct drm_i915_private
*dev_priv
)
362 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
365 fbc
->activated
= true;
367 if (INTEL_GEN(dev_priv
) >= 7)
368 gen7_fbc_activate(dev_priv
);
369 else if (INTEL_GEN(dev_priv
) >= 5)
370 ilk_fbc_activate(dev_priv
);
371 else if (IS_GM45(dev_priv
))
372 g4x_fbc_activate(dev_priv
);
374 i8xx_fbc_activate(dev_priv
);
377 static void intel_fbc_hw_deactivate(struct drm_i915_private
*dev_priv
)
379 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
383 if (INTEL_GEN(dev_priv
) >= 5)
384 ilk_fbc_deactivate(dev_priv
);
385 else if (IS_GM45(dev_priv
))
386 g4x_fbc_deactivate(dev_priv
);
388 i8xx_fbc_deactivate(dev_priv
);
392 * intel_fbc_is_active - Is FBC active?
393 * @dev_priv: i915 device instance
395 * This function is used to verify the current state of FBC.
397 * FIXME: This should be tracked in the plane config eventually
398 * instead of queried at runtime for most callers.
400 bool intel_fbc_is_active(struct drm_i915_private
*dev_priv
)
402 return dev_priv
->fbc
.active
;
405 static void intel_fbc_deactivate(struct drm_i915_private
*dev_priv
,
408 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
410 WARN_ON(!mutex_is_locked(&fbc
->lock
));
413 intel_fbc_hw_deactivate(dev_priv
);
415 fbc
->no_fbc_reason
= reason
;
418 static int find_compression_threshold(struct drm_i915_private
*dev_priv
,
419 struct drm_mm_node
*node
,
423 int compression_threshold
= 1;
427 /* The FBC hardware for BDW/SKL doesn't have access to the stolen
428 * reserved range size, so it always assumes the maximum (8mb) is used.
429 * If we enable FBC using a CFB on that memory range we'll get FIFO
430 * underruns, even if that range is not reserved by the BIOS. */
431 if (IS_BROADWELL(dev_priv
) || IS_GEN9_BC(dev_priv
))
432 end
= resource_size(&dev_priv
->dsm
) - 8 * 1024 * 1024;
436 /* HACK: This code depends on what we will do in *_enable_fbc. If that
437 * code changes, this code needs to change as well.
439 * The enable_fbc code will attempt to use one of our 2 compression
440 * thresholds, therefore, in that case, we only have 1 resort.
443 /* Try to over-allocate to reduce reallocations and fragmentation. */
444 ret
= i915_gem_stolen_insert_node_in_range(dev_priv
, node
, size
<<= 1,
447 return compression_threshold
;
450 /* HW's ability to limit the CFB is 1:4 */
451 if (compression_threshold
> 4 ||
452 (fb_cpp
== 2 && compression_threshold
== 2))
455 ret
= i915_gem_stolen_insert_node_in_range(dev_priv
, node
, size
>>= 1,
457 if (ret
&& INTEL_GEN(dev_priv
) <= 4) {
460 compression_threshold
<<= 1;
463 return compression_threshold
;
467 static int intel_fbc_alloc_cfb(struct drm_i915_private
*dev_priv
,
468 unsigned int size
, unsigned int fb_cpp
)
470 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
471 struct drm_mm_node
*uninitialized_var(compressed_llb
);
474 WARN_ON(drm_mm_node_allocated(&fbc
->compressed_fb
));
476 ret
= find_compression_threshold(dev_priv
, &fbc
->compressed_fb
,
481 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
485 fbc
->threshold
= ret
;
487 if (INTEL_GEN(dev_priv
) >= 5)
488 I915_WRITE(ILK_DPFC_CB_BASE
, fbc
->compressed_fb
.start
);
489 else if (IS_GM45(dev_priv
)) {
490 I915_WRITE(DPFC_CB_BASE
, fbc
->compressed_fb
.start
);
492 compressed_llb
= kzalloc(sizeof(*compressed_llb
), GFP_KERNEL
);
496 ret
= i915_gem_stolen_insert_node(dev_priv
, compressed_llb
,
501 fbc
->compressed_llb
= compressed_llb
;
503 GEM_BUG_ON(range_overflows_t(u64
, dev_priv
->dsm
.start
,
504 fbc
->compressed_fb
.start
,
506 GEM_BUG_ON(range_overflows_t(u64
, dev_priv
->dsm
.start
,
507 fbc
->compressed_llb
->start
,
509 I915_WRITE(FBC_CFB_BASE
,
510 dev_priv
->dsm
.start
+ fbc
->compressed_fb
.start
);
511 I915_WRITE(FBC_LL_BASE
,
512 dev_priv
->dsm
.start
+ compressed_llb
->start
);
515 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
516 fbc
->compressed_fb
.size
, fbc
->threshold
);
521 kfree(compressed_llb
);
522 i915_gem_stolen_remove_node(dev_priv
, &fbc
->compressed_fb
);
524 if (drm_mm_initialized(&dev_priv
->mm
.stolen
))
525 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size
);
529 static void __intel_fbc_cleanup_cfb(struct drm_i915_private
*dev_priv
)
531 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
533 if (drm_mm_node_allocated(&fbc
->compressed_fb
))
534 i915_gem_stolen_remove_node(dev_priv
, &fbc
->compressed_fb
);
536 if (fbc
->compressed_llb
) {
537 i915_gem_stolen_remove_node(dev_priv
, fbc
->compressed_llb
);
538 kfree(fbc
->compressed_llb
);
542 void intel_fbc_cleanup_cfb(struct drm_i915_private
*dev_priv
)
544 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
546 if (!fbc_supported(dev_priv
))
549 mutex_lock(&fbc
->lock
);
550 __intel_fbc_cleanup_cfb(dev_priv
);
551 mutex_unlock(&fbc
->lock
);
554 static bool stride_is_valid(struct drm_i915_private
*dev_priv
,
557 /* This should have been caught earlier. */
558 if (WARN_ON_ONCE((stride
& (64 - 1)) != 0))
561 /* Below are the additional FBC restrictions. */
565 if (IS_GEN(dev_priv
, 2) || IS_GEN(dev_priv
, 3))
566 return stride
== 4096 || stride
== 8192;
568 if (IS_GEN(dev_priv
, 4) && !IS_G4X(dev_priv
) && stride
< 2048)
577 static bool pixel_format_is_valid(struct drm_i915_private
*dev_priv
,
580 switch (pixel_format
) {
581 case DRM_FORMAT_XRGB8888
:
582 case DRM_FORMAT_XBGR8888
:
584 case DRM_FORMAT_XRGB1555
:
585 case DRM_FORMAT_RGB565
:
586 /* 16bpp not supported on gen2 */
587 if (IS_GEN(dev_priv
, 2))
589 /* WaFbcOnly1to1Ratio:ctg */
590 if (IS_G4X(dev_priv
))
599 * For some reason, the hardware tracking starts looking at whatever we
600 * programmed as the display plane base address register. It does not look at
601 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
602 * variables instead of just looking at the pipe/plane size.
604 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc
*crtc
)
606 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
607 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
608 unsigned int effective_w
, effective_h
, max_w
, max_h
;
610 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
)) {
613 } else if (INTEL_GEN(dev_priv
) >= 8 || IS_HASWELL(dev_priv
)) {
616 } else if (IS_G4X(dev_priv
) || INTEL_GEN(dev_priv
) >= 5) {
624 intel_fbc_get_plane_source_size(&fbc
->state_cache
, &effective_w
,
626 effective_w
+= fbc
->state_cache
.plane
.adjusted_x
;
627 effective_h
+= fbc
->state_cache
.plane
.adjusted_y
;
629 return effective_w
<= max_w
&& effective_h
<= max_h
;
632 static void intel_fbc_update_state_cache(struct intel_crtc
*crtc
,
633 const struct intel_crtc_state
*crtc_state
,
634 const struct intel_plane_state
*plane_state
)
636 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
637 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
638 struct intel_fbc_state_cache
*cache
= &fbc
->state_cache
;
639 struct drm_framebuffer
*fb
= plane_state
->hw
.fb
;
641 cache
->plane
.visible
= plane_state
->uapi
.visible
;
642 if (!cache
->plane
.visible
)
645 cache
->crtc
.mode_flags
= crtc_state
->hw
.adjusted_mode
.flags
;
646 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
647 cache
->crtc
.hsw_bdw_pixel_rate
= crtc_state
->pixel_rate
;
649 cache
->plane
.rotation
= plane_state
->hw
.rotation
;
651 * Src coordinates are already rotated by 270 degrees for
652 * the 90/270 degree plane rotation cases (to match the
653 * GTT mapping), hence no need to account for rotation here.
655 cache
->plane
.src_w
= drm_rect_width(&plane_state
->uapi
.src
) >> 16;
656 cache
->plane
.src_h
= drm_rect_height(&plane_state
->uapi
.src
) >> 16;
657 cache
->plane
.adjusted_x
= plane_state
->color_plane
[0].x
;
658 cache
->plane
.adjusted_y
= plane_state
->color_plane
[0].y
;
659 cache
->plane
.y
= plane_state
->uapi
.src
.y1
>> 16;
661 cache
->plane
.pixel_blend_mode
= plane_state
->hw
.pixel_blend_mode
;
663 cache
->fb
.format
= fb
->format
;
664 cache
->fb
.stride
= fb
->pitches
[0];
666 WARN_ON(plane_state
->flags
& PLANE_HAS_FENCE
&&
667 !plane_state
->vma
->fence
);
669 if (plane_state
->flags
& PLANE_HAS_FENCE
&&
670 plane_state
->vma
->fence
)
671 cache
->fence_id
= plane_state
->vma
->fence
->id
;
673 cache
->fence_id
= -1;
676 static bool intel_fbc_cfb_size_changed(struct drm_i915_private
*dev_priv
)
678 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
680 return intel_fbc_calculate_cfb_size(dev_priv
, &fbc
->state_cache
) >
681 fbc
->compressed_fb
.size
* fbc
->threshold
;
684 static bool intel_fbc_can_activate(struct intel_crtc
*crtc
)
686 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
687 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
688 struct intel_fbc_state_cache
*cache
= &fbc
->state_cache
;
690 if (!cache
->plane
.visible
) {
691 fbc
->no_fbc_reason
= "primary plane not visible";
695 /* We don't need to use a state cache here since this information is
696 * global for all CRTC.
698 if (fbc
->underrun_detected
) {
699 fbc
->no_fbc_reason
= "underrun detected";
703 if (cache
->crtc
.mode_flags
& DRM_MODE_FLAG_INTERLACE
) {
704 fbc
->no_fbc_reason
= "incompatible mode";
708 if (!intel_fbc_hw_tracking_covers_screen(crtc
)) {
709 fbc
->no_fbc_reason
= "mode too large for compression";
713 /* The use of a CPU fence is mandatory in order to detect writes
714 * by the CPU to the scanout and trigger updates to the FBC.
716 * Note that is possible for a tiled surface to be unmappable (and
717 * so have no fence associated with it) due to aperture constaints
718 * at the time of pinning.
720 * FIXME with 90/270 degree rotation we should use the fence on
721 * the normal GTT view (the rotated view doesn't even have a
722 * fence). Would need changes to the FBC fence Y offset as well.
723 * For now this will effecively disable FBC with 90/270 degree
726 if (cache
->fence_id
< 0) {
727 fbc
->no_fbc_reason
= "framebuffer not tiled or fenced";
730 if (INTEL_GEN(dev_priv
) <= 4 && !IS_G4X(dev_priv
) &&
731 cache
->plane
.rotation
!= DRM_MODE_ROTATE_0
) {
732 fbc
->no_fbc_reason
= "rotation unsupported";
736 if (!stride_is_valid(dev_priv
, cache
->fb
.stride
)) {
737 fbc
->no_fbc_reason
= "framebuffer stride not supported";
741 if (!pixel_format_is_valid(dev_priv
, cache
->fb
.format
->format
)) {
742 fbc
->no_fbc_reason
= "pixel format is invalid";
746 if (cache
->plane
.pixel_blend_mode
!= DRM_MODE_BLEND_PIXEL_NONE
&&
747 cache
->fb
.format
->has_alpha
) {
748 fbc
->no_fbc_reason
= "per-pixel alpha blending is incompatible with FBC";
752 /* WaFbcExceedCdClockThreshold:hsw,bdw */
753 if ((IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) &&
754 cache
->crtc
.hsw_bdw_pixel_rate
>= dev_priv
->cdclk
.hw
.cdclk
* 95 / 100) {
755 fbc
->no_fbc_reason
= "pixel rate is too big";
759 /* It is possible for the required CFB size change without a
760 * crtc->disable + crtc->enable since it is possible to change the
761 * stride without triggering a full modeset. Since we try to
762 * over-allocate the CFB, there's a chance we may keep FBC enabled even
763 * if this happens, but if we exceed the current CFB size we'll have to
764 * disable FBC. Notice that it would be possible to disable FBC, wait
765 * for a frame, free the stolen node, then try to reenable FBC in case
766 * we didn't get any invalidate/deactivate calls, but this would require
767 * a lot of tracking just for a specific case. If we conclude it's an
768 * important case, we can implement it later. */
769 if (intel_fbc_cfb_size_changed(dev_priv
)) {
770 fbc
->no_fbc_reason
= "CFB requirements changed";
775 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
776 * having a Y offset that isn't divisible by 4 causes FIFO underrun
777 * and screen flicker.
779 if (INTEL_GEN(dev_priv
) >= 9 &&
780 (fbc
->state_cache
.plane
.adjusted_y
& 3)) {
781 fbc
->no_fbc_reason
= "plane Y offset is misaligned";
788 static bool intel_fbc_can_enable(struct drm_i915_private
*dev_priv
)
790 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
792 if (intel_vgpu_active(dev_priv
)) {
793 fbc
->no_fbc_reason
= "VGPU is active";
797 if (!i915_modparams
.enable_fbc
) {
798 fbc
->no_fbc_reason
= "disabled per module param or by default";
802 if (fbc
->underrun_detected
) {
803 fbc
->no_fbc_reason
= "underrun detected";
810 static void intel_fbc_get_reg_params(struct intel_crtc
*crtc
,
811 struct intel_fbc_reg_params
*params
)
813 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
814 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
815 struct intel_fbc_state_cache
*cache
= &fbc
->state_cache
;
817 /* Since all our fields are integer types, use memset here so the
818 * comparison function can rely on memcmp because the padding will be
820 memset(params
, 0, sizeof(*params
));
822 params
->fence_id
= cache
->fence_id
;
824 params
->crtc
.pipe
= crtc
->pipe
;
825 params
->crtc
.i9xx_plane
= to_intel_plane(crtc
->base
.primary
)->i9xx_plane
;
826 params
->crtc
.fence_y_offset
= get_crtc_fence_y_offset(fbc
);
828 params
->fb
.format
= cache
->fb
.format
;
829 params
->fb
.stride
= cache
->fb
.stride
;
831 params
->cfb_size
= intel_fbc_calculate_cfb_size(dev_priv
, cache
);
833 params
->gen9_wa_cfb_stride
= cache
->gen9_wa_cfb_stride
;
835 params
->plane_visible
= cache
->plane
.visible
;
838 static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state
*crtc_state
)
840 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
841 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
842 const struct intel_fbc
*fbc
= &dev_priv
->fbc
;
843 const struct intel_fbc_state_cache
*cache
= &fbc
->state_cache
;
844 const struct intel_fbc_reg_params
*params
= &fbc
->params
;
846 if (drm_atomic_crtc_needs_modeset(&crtc_state
->uapi
))
849 if (!params
->plane_visible
)
852 if (!intel_fbc_can_activate(crtc
))
855 if (params
->fb
.format
!= cache
->fb
.format
)
858 if (params
->fb
.stride
!= cache
->fb
.stride
)
861 if (params
->cfb_size
!= intel_fbc_calculate_cfb_size(dev_priv
, cache
))
864 if (params
->gen9_wa_cfb_stride
!= cache
->gen9_wa_cfb_stride
)
870 bool intel_fbc_pre_update(struct intel_crtc
*crtc
,
871 const struct intel_crtc_state
*crtc_state
,
872 const struct intel_plane_state
*plane_state
)
874 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
875 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
876 const char *reason
= "update pending";
877 bool need_vblank_wait
= false;
879 if (!fbc_supported(dev_priv
))
880 return need_vblank_wait
;
882 mutex_lock(&fbc
->lock
);
884 if (fbc
->crtc
!= crtc
)
887 intel_fbc_update_state_cache(crtc
, crtc_state
, plane_state
);
888 fbc
->flip_pending
= true;
890 if (!intel_fbc_can_flip_nuke(crtc_state
)) {
891 intel_fbc_deactivate(dev_priv
, reason
);
894 * Display WA #1198: glk+
895 * Need an extra vblank wait between FBC disable and most plane
896 * updates. Bspec says this is only needed for plane disable, but
897 * that is not true. Touching most plane registers will cause the
898 * corruption to appear. Also SKL/derivatives do not seem to be
901 * TODO: could optimize this a bit by sampling the frame
902 * counter when we disable FBC (if it was already done earlier)
903 * and skipping the extra vblank wait before the plane update
904 * if at least one frame has already passed.
906 if (fbc
->activated
&&
907 (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
)))
908 need_vblank_wait
= true;
909 fbc
->activated
= false;
912 mutex_unlock(&fbc
->lock
);
914 return need_vblank_wait
;
918 * __intel_fbc_disable - disable FBC
919 * @dev_priv: i915 device instance
921 * This is the low level function that actually disables FBC. Callers should
924 static void __intel_fbc_disable(struct drm_i915_private
*dev_priv
)
926 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
927 struct intel_crtc
*crtc
= fbc
->crtc
;
929 WARN_ON(!mutex_is_locked(&fbc
->lock
));
931 WARN_ON(fbc
->active
);
933 DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc
->pipe
));
935 __intel_fbc_cleanup_cfb(dev_priv
);
940 static void __intel_fbc_post_update(struct intel_crtc
*crtc
)
942 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
943 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
945 WARN_ON(!mutex_is_locked(&fbc
->lock
));
947 if (fbc
->crtc
!= crtc
)
950 fbc
->flip_pending
= false;
952 if (!i915_modparams
.enable_fbc
) {
953 intel_fbc_deactivate(dev_priv
, "disabled at runtime per module param");
954 __intel_fbc_disable(dev_priv
);
959 intel_fbc_get_reg_params(crtc
, &fbc
->params
);
961 if (!intel_fbc_can_activate(crtc
))
965 intel_fbc_hw_activate(dev_priv
);
967 intel_fbc_deactivate(dev_priv
, "frontbuffer write");
970 void intel_fbc_post_update(struct intel_crtc
*crtc
)
972 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
973 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
975 if (!fbc_supported(dev_priv
))
978 mutex_lock(&fbc
->lock
);
979 __intel_fbc_post_update(crtc
);
980 mutex_unlock(&fbc
->lock
);
983 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc
*fbc
)
986 return to_intel_plane(fbc
->crtc
->base
.primary
)->frontbuffer_bit
;
988 return fbc
->possible_framebuffer_bits
;
991 void intel_fbc_invalidate(struct drm_i915_private
*dev_priv
,
992 unsigned int frontbuffer_bits
,
993 enum fb_op_origin origin
)
995 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
997 if (!fbc_supported(dev_priv
))
1000 if (origin
== ORIGIN_GTT
|| origin
== ORIGIN_FLIP
)
1003 mutex_lock(&fbc
->lock
);
1005 fbc
->busy_bits
|= intel_fbc_get_frontbuffer_bit(fbc
) & frontbuffer_bits
;
1007 if (fbc
->crtc
&& fbc
->busy_bits
)
1008 intel_fbc_deactivate(dev_priv
, "frontbuffer write");
1010 mutex_unlock(&fbc
->lock
);
1013 void intel_fbc_flush(struct drm_i915_private
*dev_priv
,
1014 unsigned int frontbuffer_bits
, enum fb_op_origin origin
)
1016 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1018 if (!fbc_supported(dev_priv
))
1021 mutex_lock(&fbc
->lock
);
1023 fbc
->busy_bits
&= ~frontbuffer_bits
;
1025 if (origin
== ORIGIN_GTT
|| origin
== ORIGIN_FLIP
)
1028 if (!fbc
->busy_bits
&& fbc
->crtc
&&
1029 (frontbuffer_bits
& intel_fbc_get_frontbuffer_bit(fbc
))) {
1031 intel_fbc_recompress(dev_priv
);
1032 else if (!fbc
->flip_pending
)
1033 __intel_fbc_post_update(fbc
->crtc
);
1037 mutex_unlock(&fbc
->lock
);
1041 * intel_fbc_choose_crtc - select a CRTC to enable FBC on
1042 * @dev_priv: i915 device instance
1043 * @state: the atomic state structure
1045 * This function looks at the proposed state for CRTCs and planes, then chooses
1046 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
1049 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
1050 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
1052 void intel_fbc_choose_crtc(struct drm_i915_private
*dev_priv
,
1053 struct intel_atomic_state
*state
)
1055 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1056 struct intel_plane
*plane
;
1057 struct intel_plane_state
*plane_state
;
1058 bool crtc_chosen
= false;
1061 mutex_lock(&fbc
->lock
);
1063 /* Does this atomic commit involve the CRTC currently tied to FBC? */
1065 !intel_atomic_get_new_crtc_state(state
, fbc
->crtc
))
1068 if (!intel_fbc_can_enable(dev_priv
))
1071 /* Simply choose the first CRTC that is compatible and has a visible
1072 * plane. We could go for fancier schemes such as checking the plane
1073 * size, but this would just affect the few platforms that don't tie FBC
1074 * to pipe or plane A. */
1075 for_each_new_intel_plane_in_state(state
, plane
, plane_state
, i
) {
1076 struct intel_crtc_state
*crtc_state
;
1077 struct intel_crtc
*crtc
= to_intel_crtc(plane_state
->hw
.crtc
);
1079 if (!plane
->has_fbc
)
1082 if (!plane_state
->uapi
.visible
)
1085 crtc_state
= intel_atomic_get_new_crtc_state(state
, crtc
);
1087 crtc_state
->enable_fbc
= true;
1093 fbc
->no_fbc_reason
= "no suitable CRTC for FBC";
1096 mutex_unlock(&fbc
->lock
);
1100 * intel_fbc_enable: tries to enable FBC on the CRTC
1102 * @crtc_state: corresponding &drm_crtc_state for @crtc
1103 * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
1105 * This function checks if the given CRTC was chosen for FBC, then enables it if
1106 * possible. Notice that it doesn't activate FBC. It is valid to call
1107 * intel_fbc_enable multiple times for the same pipe without an
1108 * intel_fbc_disable in the middle, as long as it is deactivated.
1110 void intel_fbc_enable(struct intel_crtc
*crtc
,
1111 const struct intel_crtc_state
*crtc_state
,
1112 const struct intel_plane_state
*plane_state
)
1114 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1115 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1116 struct intel_fbc_state_cache
*cache
= &fbc
->state_cache
;
1117 const struct drm_framebuffer
*fb
= plane_state
->hw
.fb
;
1119 if (!fbc_supported(dev_priv
))
1122 mutex_lock(&fbc
->lock
);
1125 if (fbc
->crtc
!= crtc
||
1126 !intel_fbc_cfb_size_changed(dev_priv
))
1129 __intel_fbc_disable(dev_priv
);
1132 WARN_ON(fbc
->active
);
1134 intel_fbc_update_state_cache(crtc
, crtc_state
, plane_state
);
1136 /* FIXME crtc_state->enable_fbc lies :( */
1137 if (!cache
->plane
.visible
)
1140 if (intel_fbc_alloc_cfb(dev_priv
,
1141 intel_fbc_calculate_cfb_size(dev_priv
, cache
),
1142 fb
->format
->cpp
[0])) {
1143 cache
->plane
.visible
= false;
1144 fbc
->no_fbc_reason
= "not enough stolen memory";
1148 if ((IS_GEN9_BC(dev_priv
) || IS_BROXTON(dev_priv
)) &&
1149 fb
->modifier
!= I915_FORMAT_MOD_X_TILED
)
1150 cache
->gen9_wa_cfb_stride
=
1151 DIV_ROUND_UP(cache
->plane
.src_w
, 32 * fbc
->threshold
) * 8;
1153 cache
->gen9_wa_cfb_stride
= 0;
1155 DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc
->pipe
));
1156 fbc
->no_fbc_reason
= "FBC enabled but not active yet\n";
1160 mutex_unlock(&fbc
->lock
);
1164 * intel_fbc_disable - disable FBC if it's associated with crtc
1167 * This function disables FBC if it's associated with the provided CRTC.
1169 void intel_fbc_disable(struct intel_crtc
*crtc
)
1171 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1172 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1174 if (!fbc_supported(dev_priv
))
1177 mutex_lock(&fbc
->lock
);
1178 if (fbc
->crtc
== crtc
)
1179 __intel_fbc_disable(dev_priv
);
1180 mutex_unlock(&fbc
->lock
);
1184 * intel_fbc_global_disable - globally disable FBC
1185 * @dev_priv: i915 device instance
1187 * This function disables FBC regardless of which CRTC is associated with it.
1189 void intel_fbc_global_disable(struct drm_i915_private
*dev_priv
)
1191 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1193 if (!fbc_supported(dev_priv
))
1196 mutex_lock(&fbc
->lock
);
1198 WARN_ON(fbc
->crtc
->active
);
1199 __intel_fbc_disable(dev_priv
);
1201 mutex_unlock(&fbc
->lock
);
1204 static void intel_fbc_underrun_work_fn(struct work_struct
*work
)
1206 struct drm_i915_private
*dev_priv
=
1207 container_of(work
, struct drm_i915_private
, fbc
.underrun_work
);
1208 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1210 mutex_lock(&fbc
->lock
);
1212 /* Maybe we were scheduled twice. */
1213 if (fbc
->underrun_detected
|| !fbc
->crtc
)
1216 DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
1217 fbc
->underrun_detected
= true;
1219 intel_fbc_deactivate(dev_priv
, "FIFO underrun");
1221 mutex_unlock(&fbc
->lock
);
1225 * intel_fbc_reset_underrun - reset FBC fifo underrun status.
1226 * @dev_priv: i915 device instance
1228 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
1229 * want to re-enable FBC after an underrun to increase test coverage.
1231 int intel_fbc_reset_underrun(struct drm_i915_private
*dev_priv
)
1235 cancel_work_sync(&dev_priv
->fbc
.underrun_work
);
1237 ret
= mutex_lock_interruptible(&dev_priv
->fbc
.lock
);
1241 if (dev_priv
->fbc
.underrun_detected
) {
1242 DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
1243 dev_priv
->fbc
.no_fbc_reason
= "FIFO underrun cleared";
1246 dev_priv
->fbc
.underrun_detected
= false;
1247 mutex_unlock(&dev_priv
->fbc
.lock
);
1253 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1254 * @dev_priv: i915 device instance
1256 * Without FBC, most underruns are harmless and don't really cause too many
1257 * problems, except for an annoying message on dmesg. With FBC, underruns can
1258 * become black screens or even worse, especially when paired with bad
1259 * watermarks. So in order for us to be on the safe side, completely disable FBC
1260 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
1261 * already suggests that watermarks may be bad, so try to be as safe as
1264 * This function is called from the IRQ handler.
1266 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private
*dev_priv
)
1268 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1270 if (!fbc_supported(dev_priv
))
1273 /* There's no guarantee that underrun_detected won't be set to true
1274 * right after this check and before the work is scheduled, but that's
1275 * not a problem since we'll check it again under the work function
1276 * while FBC is locked. This check here is just to prevent us from
1277 * unnecessarily scheduling the work, and it relies on the fact that we
1278 * never switch underrun_detect back to false after it's true. */
1279 if (READ_ONCE(fbc
->underrun_detected
))
1282 schedule_work(&fbc
->underrun_work
);
1286 * The DDX driver changes its behavior depending on the value it reads from
1287 * i915.enable_fbc, so sanitize it by translating the default value into either
1288 * 0 or 1 in order to allow it to know what's going on.
1290 * Notice that this is done at driver initialization and we still allow user
1291 * space to change the value during runtime without sanitizing it again. IGT
1292 * relies on being able to change i915.enable_fbc at runtime.
1294 static int intel_sanitize_fbc_option(struct drm_i915_private
*dev_priv
)
1296 if (i915_modparams
.enable_fbc
>= 0)
1297 return !!i915_modparams
.enable_fbc
;
1299 if (!HAS_FBC(dev_priv
))
1302 if (IS_BROADWELL(dev_priv
) || INTEL_GEN(dev_priv
) >= 9)
1308 static bool need_fbc_vtd_wa(struct drm_i915_private
*dev_priv
)
1310 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1311 if (intel_vtd_active() &&
1312 (IS_SKYLAKE(dev_priv
) || IS_BROXTON(dev_priv
))) {
1313 DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1321 * intel_fbc_init - Initialize FBC
1322 * @dev_priv: the i915 device
1324 * This function might be called during PM init process.
1326 void intel_fbc_init(struct drm_i915_private
*dev_priv
)
1328 struct intel_fbc
*fbc
= &dev_priv
->fbc
;
1330 INIT_WORK(&fbc
->underrun_work
, intel_fbc_underrun_work_fn
);
1331 mutex_init(&fbc
->lock
);
1332 fbc
->active
= false;
1334 if (!drm_mm_initialized(&dev_priv
->mm
.stolen
))
1335 mkwrite_device_info(dev_priv
)->display
.has_fbc
= false;
1337 if (need_fbc_vtd_wa(dev_priv
))
1338 mkwrite_device_info(dev_priv
)->display
.has_fbc
= false;
1340 i915_modparams
.enable_fbc
= intel_sanitize_fbc_option(dev_priv
);
1341 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
1342 i915_modparams
.enable_fbc
);
1344 if (!HAS_FBC(dev_priv
)) {
1345 fbc
->no_fbc_reason
= "unsupported by this chipset";
1349 /* This value was pulled out of someone's hat */
1350 if (INTEL_GEN(dev_priv
) <= 4 && !IS_GM45(dev_priv
))
1351 I915_WRITE(FBC_CONTROL
, 500 << FBC_CTL_INTERVAL_SHIFT
);
1353 /* We still don't have any sort of hardware state readout for FBC, so
1354 * deactivate it in case the BIOS activated it to make sure software
1355 * matches the hardware state. */
1356 if (intel_fbc_hw_is_active(dev_priv
))
1357 intel_fbc_hw_deactivate(dev_priv
);