2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/module.h>
29 #include <linux/pm_runtime.h>
31 #include <drm/drm_atomic_helper.h>
32 #include <drm/drm_fourcc.h>
33 #include <drm/drm_plane_helper.h>
35 #include "display/intel_atomic.h"
36 #include "display/intel_display_types.h"
37 #include "display/intel_fbc.h"
38 #include "display/intel_sprite.h"
40 #include "gt/intel_llc.h"
44 #include "i915_trace.h"
46 #include "intel_sideband.h"
47 #include "../../../platform/x86/intel_ips.h"
49 static void gen9_init_clock_gating(struct drm_i915_private
*dev_priv
)
51 if (HAS_LLC(dev_priv
)) {
53 * WaCompressedResourceDisplayNewHashMode:skl,kbl
54 * Display WA #0390: skl,kbl
56 * Must match Sampler, Pixel Back End, and Media. See
57 * WaCompressedResourceSamplerPbeMediaNewHashMode.
59 I915_WRITE(CHICKEN_PAR1_1
,
60 I915_READ(CHICKEN_PAR1_1
) |
61 SKL_DE_COMPRESSED_HASH_MODE
);
64 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
65 I915_WRITE(CHICKEN_PAR1_1
,
66 I915_READ(CHICKEN_PAR1_1
) | SKL_EDP_PSR_FIX_RDWRAP
);
68 /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
69 I915_WRITE(GEN8_CHICKEN_DCPR_1
,
70 I915_READ(GEN8_CHICKEN_DCPR_1
) | MASK_WAKEMEM
);
72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
73 /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
74 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
76 DISP_FBC_MEMORY_WAKE
);
78 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
79 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
80 ILK_DPFC_DISABLE_DUMMY0
);
82 if (IS_SKYLAKE(dev_priv
)) {
83 /* WaDisableDopClockGating */
84 I915_WRITE(GEN7_MISCCPCTL
, I915_READ(GEN7_MISCCPCTL
)
85 & ~GEN7_DOP_CLOCK_GATE_ENABLE
);
89 static void bxt_init_clock_gating(struct drm_i915_private
*dev_priv
)
91 gen9_init_clock_gating(dev_priv
);
93 /* WaDisableSDEUnitClockGating:bxt */
94 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
95 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
99 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
101 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
102 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ
);
105 * Wa: Backlight PWM may stop in the asserted state, causing backlight
108 I915_WRITE(GEN9_CLKGATE_DIS_0
, I915_READ(GEN9_CLKGATE_DIS_0
) |
109 PWM1_GATING_DIS
| PWM2_GATING_DIS
);
112 * Lower the display internal timeout.
113 * This is needed to avoid any hard hangs when DSI port PLL
114 * is off and a MMIO access is attempted by any privilege
115 * application, using batch buffers or any other means.
117 I915_WRITE(RM_TIMEOUT
, MMIO_TIMEOUT_US(950));
120 static void glk_init_clock_gating(struct drm_i915_private
*dev_priv
)
122 gen9_init_clock_gating(dev_priv
);
125 * WaDisablePWMClockGating:glk
126 * Backlight PWM may stop in the asserted state, causing backlight
129 I915_WRITE(GEN9_CLKGATE_DIS_0
, I915_READ(GEN9_CLKGATE_DIS_0
) |
130 PWM1_GATING_DIS
| PWM2_GATING_DIS
);
132 /* WaDDIIOTimeout:glk */
133 if (IS_GLK_REVID(dev_priv
, 0, GLK_REVID_A1
)) {
134 u32 val
= I915_READ(CHICKEN_MISC_2
);
135 val
&= ~(GLK_CL0_PWR_DOWN
|
138 I915_WRITE(CHICKEN_MISC_2
, val
);
143 static void pnv_get_mem_freq(struct drm_i915_private
*dev_priv
)
147 tmp
= I915_READ(CLKCFG
);
149 switch (tmp
& CLKCFG_FSB_MASK
) {
151 dev_priv
->fsb_freq
= 533; /* 133*4 */
154 dev_priv
->fsb_freq
= 800; /* 200*4 */
157 dev_priv
->fsb_freq
= 667; /* 167*4 */
160 dev_priv
->fsb_freq
= 400; /* 100*4 */
164 switch (tmp
& CLKCFG_MEM_MASK
) {
166 dev_priv
->mem_freq
= 533;
169 dev_priv
->mem_freq
= 667;
172 dev_priv
->mem_freq
= 800;
176 /* detect pineview DDR3 setting */
177 tmp
= I915_READ(CSHRDDR3CTL
);
178 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
181 static void ilk_get_mem_freq(struct drm_i915_private
*dev_priv
)
185 ddrpll
= intel_uncore_read16(&dev_priv
->uncore
, DDRMPLL1
);
186 csipll
= intel_uncore_read16(&dev_priv
->uncore
, CSIPLL0
);
188 switch (ddrpll
& 0xff) {
190 dev_priv
->mem_freq
= 800;
193 dev_priv
->mem_freq
= 1066;
196 dev_priv
->mem_freq
= 1333;
199 dev_priv
->mem_freq
= 1600;
202 drm_dbg(&dev_priv
->drm
, "unknown memory frequency 0x%02x\n",
204 dev_priv
->mem_freq
= 0;
208 switch (csipll
& 0x3ff) {
210 dev_priv
->fsb_freq
= 3200;
213 dev_priv
->fsb_freq
= 3733;
216 dev_priv
->fsb_freq
= 4266;
219 dev_priv
->fsb_freq
= 4800;
222 dev_priv
->fsb_freq
= 5333;
225 dev_priv
->fsb_freq
= 5866;
228 dev_priv
->fsb_freq
= 6400;
231 drm_dbg(&dev_priv
->drm
, "unknown fsb frequency 0x%04x\n",
233 dev_priv
->fsb_freq
= 0;
238 static const struct cxsr_latency cxsr_latency_table
[] = {
239 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
240 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
241 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
242 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
243 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
245 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
246 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
247 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
248 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
249 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
251 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
252 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
253 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
254 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
255 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
257 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
258 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
259 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
260 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
261 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
263 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
264 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
265 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
266 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
267 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
269 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
270 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
271 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
272 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
273 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
276 static const struct cxsr_latency
*intel_get_cxsr_latency(bool is_desktop
,
281 const struct cxsr_latency
*latency
;
284 if (fsb
== 0 || mem
== 0)
287 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
288 latency
= &cxsr_latency_table
[i
];
289 if (is_desktop
== latency
->is_desktop
&&
290 is_ddr3
== latency
->is_ddr3
&&
291 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
295 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
300 static void chv_set_memory_dvfs(struct drm_i915_private
*dev_priv
, bool enable
)
304 vlv_punit_get(dev_priv
);
306 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
308 val
&= ~FORCE_DDR_HIGH_FREQ
;
310 val
|= FORCE_DDR_HIGH_FREQ
;
311 val
&= ~FORCE_DDR_LOW_FREQ
;
312 val
|= FORCE_DDR_FREQ_REQ_ACK
;
313 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
315 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
316 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3))
317 drm_err(&dev_priv
->drm
,
318 "timed out waiting for Punit DDR DVFS request\n");
320 vlv_punit_put(dev_priv
);
323 static void chv_set_memory_pm5(struct drm_i915_private
*dev_priv
, bool enable
)
327 vlv_punit_get(dev_priv
);
329 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPSSPM
);
331 val
|= DSP_MAXFIFO_PM5_ENABLE
;
333 val
&= ~DSP_MAXFIFO_PM5_ENABLE
;
334 vlv_punit_write(dev_priv
, PUNIT_REG_DSPSSPM
, val
);
336 vlv_punit_put(dev_priv
);
339 #define FW_WM(value, plane) \
340 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
342 static bool _intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
347 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
348 was_enabled
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
349 I915_WRITE(FW_BLC_SELF_VLV
, enable
? FW_CSPWRDWNEN
: 0);
350 POSTING_READ(FW_BLC_SELF_VLV
);
351 } else if (IS_G4X(dev_priv
) || IS_I965GM(dev_priv
)) {
352 was_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
353 I915_WRITE(FW_BLC_SELF
, enable
? FW_BLC_SELF_EN
: 0);
354 POSTING_READ(FW_BLC_SELF
);
355 } else if (IS_PINEVIEW(dev_priv
)) {
356 val
= I915_READ(DSPFW3
);
357 was_enabled
= val
& PINEVIEW_SELF_REFRESH_EN
;
359 val
|= PINEVIEW_SELF_REFRESH_EN
;
361 val
&= ~PINEVIEW_SELF_REFRESH_EN
;
362 I915_WRITE(DSPFW3
, val
);
363 POSTING_READ(DSPFW3
);
364 } else if (IS_I945G(dev_priv
) || IS_I945GM(dev_priv
)) {
365 was_enabled
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
366 val
= enable
? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN
) :
367 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN
);
368 I915_WRITE(FW_BLC_SELF
, val
);
369 POSTING_READ(FW_BLC_SELF
);
370 } else if (IS_I915GM(dev_priv
)) {
372 * FIXME can't find a bit like this for 915G, and
373 * and yet it does have the related watermark in
374 * FW_BLC_SELF. What's going on?
376 was_enabled
= I915_READ(INSTPM
) & INSTPM_SELF_EN
;
377 val
= enable
? _MASKED_BIT_ENABLE(INSTPM_SELF_EN
) :
378 _MASKED_BIT_DISABLE(INSTPM_SELF_EN
);
379 I915_WRITE(INSTPM
, val
);
380 POSTING_READ(INSTPM
);
385 trace_intel_memory_cxsr(dev_priv
, was_enabled
, enable
);
387 drm_dbg_kms(&dev_priv
->drm
, "memory self-refresh is %s (was %s)\n",
388 enableddisabled(enable
),
389 enableddisabled(was_enabled
));
395 * intel_set_memory_cxsr - Configure CxSR state
396 * @dev_priv: i915 device
397 * @enable: Allow vs. disallow CxSR
399 * Allow or disallow the system to enter a special CxSR
400 * (C-state self refresh) state. What typically happens in CxSR mode
401 * is that several display FIFOs may get combined into a single larger
402 * FIFO for a particular plane (so called max FIFO mode) to allow the
403 * system to defer memory fetches longer, and the memory will enter
406 * Note that enabling CxSR does not guarantee that the system enter
407 * this special mode, nor does it guarantee that the system stays
408 * in that mode once entered. So this just allows/disallows the system
409 * to autonomously utilize the CxSR mode. Other factors such as core
410 * C-states will affect when/if the system actually enters/exits the
413 * Note that on VLV/CHV this actually only controls the max FIFO mode,
414 * and the system is free to enter/exit memory self refresh at any time
415 * even when the use of CxSR has been disallowed.
417 * While the system is actually in the CxSR/max FIFO mode, some plane
418 * control registers will not get latched on vblank. Thus in order to
419 * guarantee the system will respond to changes in the plane registers
420 * we must always disallow CxSR prior to making changes to those registers.
421 * Unfortunately the system will re-evaluate the CxSR conditions at
422 * frame start which happens after vblank start (which is when the plane
423 * registers would get latched), so we can't proceed with the plane update
424 * during the same frame where we disallowed CxSR.
426 * Certain platforms also have a deeper HPLL SR mode. Fortunately the
427 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
428 * the hardware w.r.t. HPLL SR when writing to plane registers.
429 * Disallowing just CxSR is sufficient.
431 bool intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
435 mutex_lock(&dev_priv
->wm
.wm_mutex
);
436 ret
= _intel_set_memory_cxsr(dev_priv
, enable
);
437 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
438 dev_priv
->wm
.vlv
.cxsr
= enable
;
439 else if (IS_G4X(dev_priv
))
440 dev_priv
->wm
.g4x
.cxsr
= enable
;
441 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
447 * Latency for FIFO fetches is dependent on several factors:
448 * - memory configuration (speed, channels)
450 * - current MCH state
451 * It can be fairly high in some situations, so here we assume a fairly
452 * pessimal value. It's a tradeoff between extra memory fetches (if we
453 * set this value too high, the FIFO will fetch frequently to stay full)
454 * and power consumption (set it too low to save power and we might see
455 * FIFO underruns and display "flicker").
457 * A value of 5us seems to be a good balance; safe for very low end
458 * platforms but not overly aggressive on lower latency configs.
460 static const int pessimal_latency_ns
= 5000;
462 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
463 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
465 static void vlv_get_fifo_size(struct intel_crtc_state
*crtc_state
)
467 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
468 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
469 struct vlv_fifo_state
*fifo_state
= &crtc_state
->wm
.vlv
.fifo_state
;
470 enum pipe pipe
= crtc
->pipe
;
471 int sprite0_start
, sprite1_start
;
474 u32 dsparb
, dsparb2
, dsparb3
;
476 dsparb
= I915_READ(DSPARB
);
477 dsparb2
= I915_READ(DSPARB2
);
478 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 0, 0);
479 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 8, 4);
482 dsparb
= I915_READ(DSPARB
);
483 dsparb2
= I915_READ(DSPARB2
);
484 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 16, 8);
485 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 24, 12);
488 dsparb2
= I915_READ(DSPARB2
);
489 dsparb3
= I915_READ(DSPARB3
);
490 sprite0_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 0, 16);
491 sprite1_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 8, 20);
498 fifo_state
->plane
[PLANE_PRIMARY
] = sprite0_start
;
499 fifo_state
->plane
[PLANE_SPRITE0
] = sprite1_start
- sprite0_start
;
500 fifo_state
->plane
[PLANE_SPRITE1
] = 511 - sprite1_start
;
501 fifo_state
->plane
[PLANE_CURSOR
] = 63;
504 static int i9xx_get_fifo_size(struct drm_i915_private
*dev_priv
,
505 enum i9xx_plane_id i9xx_plane
)
507 u32 dsparb
= I915_READ(DSPARB
);
510 size
= dsparb
& 0x7f;
511 if (i9xx_plane
== PLANE_B
)
512 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
514 drm_dbg_kms(&dev_priv
->drm
, "FIFO size - (0x%08x) %c: %d\n",
515 dsparb
, plane_name(i9xx_plane
), size
);
520 static int i830_get_fifo_size(struct drm_i915_private
*dev_priv
,
521 enum i9xx_plane_id i9xx_plane
)
523 u32 dsparb
= I915_READ(DSPARB
);
526 size
= dsparb
& 0x1ff;
527 if (i9xx_plane
== PLANE_B
)
528 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
529 size
>>= 1; /* Convert to cachelines */
531 drm_dbg_kms(&dev_priv
->drm
, "FIFO size - (0x%08x) %c: %d\n",
532 dsparb
, plane_name(i9xx_plane
), size
);
537 static int i845_get_fifo_size(struct drm_i915_private
*dev_priv
,
538 enum i9xx_plane_id i9xx_plane
)
540 u32 dsparb
= I915_READ(DSPARB
);
543 size
= dsparb
& 0x7f;
544 size
>>= 2; /* Convert to cachelines */
546 drm_dbg_kms(&dev_priv
->drm
, "FIFO size - (0x%08x) %c: %d\n",
547 dsparb
, plane_name(i9xx_plane
), size
);
552 /* Pineview has different values for various configs */
553 static const struct intel_watermark_params pnv_display_wm
= {
554 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
555 .max_wm
= PINEVIEW_MAX_WM
,
556 .default_wm
= PINEVIEW_DFT_WM
,
557 .guard_size
= PINEVIEW_GUARD_WM
,
558 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
561 static const struct intel_watermark_params pnv_display_hplloff_wm
= {
562 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
563 .max_wm
= PINEVIEW_MAX_WM
,
564 .default_wm
= PINEVIEW_DFT_HPLLOFF_WM
,
565 .guard_size
= PINEVIEW_GUARD_WM
,
566 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
569 static const struct intel_watermark_params pnv_cursor_wm
= {
570 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
571 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
572 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
573 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
574 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
577 static const struct intel_watermark_params pnv_cursor_hplloff_wm
= {
578 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
579 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
580 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
581 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
582 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
585 static const struct intel_watermark_params i965_cursor_wm_info
= {
586 .fifo_size
= I965_CURSOR_FIFO
,
587 .max_wm
= I965_CURSOR_MAX_WM
,
588 .default_wm
= I965_CURSOR_DFT_WM
,
590 .cacheline_size
= I915_FIFO_LINE_SIZE
,
593 static const struct intel_watermark_params i945_wm_info
= {
594 .fifo_size
= I945_FIFO_SIZE
,
595 .max_wm
= I915_MAX_WM
,
598 .cacheline_size
= I915_FIFO_LINE_SIZE
,
601 static const struct intel_watermark_params i915_wm_info
= {
602 .fifo_size
= I915_FIFO_SIZE
,
603 .max_wm
= I915_MAX_WM
,
606 .cacheline_size
= I915_FIFO_LINE_SIZE
,
609 static const struct intel_watermark_params i830_a_wm_info
= {
610 .fifo_size
= I855GM_FIFO_SIZE
,
611 .max_wm
= I915_MAX_WM
,
614 .cacheline_size
= I830_FIFO_LINE_SIZE
,
617 static const struct intel_watermark_params i830_bc_wm_info
= {
618 .fifo_size
= I855GM_FIFO_SIZE
,
619 .max_wm
= I915_MAX_WM
/2,
622 .cacheline_size
= I830_FIFO_LINE_SIZE
,
625 static const struct intel_watermark_params i845_wm_info
= {
626 .fifo_size
= I830_FIFO_SIZE
,
627 .max_wm
= I915_MAX_WM
,
630 .cacheline_size
= I830_FIFO_LINE_SIZE
,
634 * intel_wm_method1 - Method 1 / "small buffer" watermark formula
635 * @pixel_rate: Pipe pixel rate in kHz
636 * @cpp: Plane bytes per pixel
637 * @latency: Memory wakeup latency in 0.1us units
639 * Compute the watermark using the method 1 or "small buffer"
640 * formula. The caller may additonally add extra cachelines
641 * to account for TLB misses and clock crossings.
643 * This method is concerned with the short term drain rate
644 * of the FIFO, ie. it does not account for blanking periods
645 * which would effectively reduce the average drain rate across
646 * a longer period. The name "small" refers to the fact the
647 * FIFO is relatively small compared to the amount of data
650 * The FIFO level vs. time graph might look something like:
654 * __---__---__ (- plane active, _ blanking)
657 * or perhaps like this:
660 * __----__----__ (- plane active, _ blanking)
664 * The watermark in bytes
666 static unsigned int intel_wm_method1(unsigned int pixel_rate
,
668 unsigned int latency
)
672 ret
= mul_u32_u32(pixel_rate
, cpp
* latency
);
673 ret
= DIV_ROUND_UP_ULL(ret
, 10000);
679 * intel_wm_method2 - Method 2 / "large buffer" watermark formula
680 * @pixel_rate: Pipe pixel rate in kHz
681 * @htotal: Pipe horizontal total
682 * @width: Plane width in pixels
683 * @cpp: Plane bytes per pixel
684 * @latency: Memory wakeup latency in 0.1us units
686 * Compute the watermark using the method 2 or "large buffer"
687 * formula. The caller may additonally add extra cachelines
688 * to account for TLB misses and clock crossings.
690 * This method is concerned with the long term drain rate
691 * of the FIFO, ie. it does account for blanking periods
692 * which effectively reduce the average drain rate across
693 * a longer period. The name "large" refers to the fact the
694 * FIFO is relatively large compared to the amount of data
697 * The FIFO level vs. time graph might look something like:
702 * __ --__--__--__--__--__--__ (- plane active, _ blanking)
706 * The watermark in bytes
708 static unsigned int intel_wm_method2(unsigned int pixel_rate
,
712 unsigned int latency
)
717 * FIXME remove once all users are computing
718 * watermarks in the correct place.
720 if (WARN_ON_ONCE(htotal
== 0))
723 ret
= (latency
* pixel_rate
) / (htotal
* 10000);
724 ret
= (ret
+ 1) * width
* cpp
;
730 * intel_calculate_wm - calculate watermark level
731 * @pixel_rate: pixel clock
732 * @wm: chip FIFO params
733 * @fifo_size: size of the FIFO buffer
734 * @cpp: bytes per pixel
735 * @latency_ns: memory latency for the platform
737 * Calculate the watermark level (the level at which the display plane will
738 * start fetching from memory again). Each chip has a different display
739 * FIFO size and allocation, so the caller needs to figure that out and pass
740 * in the correct intel_watermark_params structure.
742 * As the pixel clock runs, the FIFO will be drained at a rate that depends
743 * on the pixel size. When it reaches the watermark level, it'll start
744 * fetching FIFO line sized based chunks from memory until the FIFO fills
745 * past the watermark point. If the FIFO drains completely, a FIFO underrun
746 * will occur, and a display engine hang could result.
748 static unsigned int intel_calculate_wm(int pixel_rate
,
749 const struct intel_watermark_params
*wm
,
750 int fifo_size
, int cpp
,
751 unsigned int latency_ns
)
753 int entries
, wm_size
;
756 * Note: we need to make sure we don't overflow for various clock &
758 * clocks go from a few thousand to several hundred thousand.
759 * latency is usually a few thousand
761 entries
= intel_wm_method1(pixel_rate
, cpp
,
763 entries
= DIV_ROUND_UP(entries
, wm
->cacheline_size
) +
765 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries
);
767 wm_size
= fifo_size
- entries
;
768 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size
);
770 /* Don't promote wm_size to unsigned... */
771 if (wm_size
> wm
->max_wm
)
772 wm_size
= wm
->max_wm
;
774 wm_size
= wm
->default_wm
;
777 * Bspec seems to indicate that the value shouldn't be lower than
778 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
779 * Lets go for 8 which is the burst size since certain platforms
780 * already use a hardcoded 8 (which is what the spec says should be
789 static bool is_disabling(int old
, int new, int threshold
)
791 return old
>= threshold
&& new < threshold
;
794 static bool is_enabling(int old
, int new, int threshold
)
796 return old
< threshold
&& new >= threshold
;
799 static int intel_wm_num_levels(struct drm_i915_private
*dev_priv
)
801 return dev_priv
->wm
.max_level
+ 1;
804 static bool intel_wm_plane_visible(const struct intel_crtc_state
*crtc_state
,
805 const struct intel_plane_state
*plane_state
)
807 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
809 /* FIXME check the 'enable' instead */
810 if (!crtc_state
->hw
.active
)
814 * Treat cursor with fb as always visible since cursor updates
815 * can happen faster than the vrefresh rate, and the current
816 * watermark code doesn't handle that correctly. Cursor updates
817 * which set/clear the fb or change the cursor size are going
818 * to get throttled by intel_legacy_cursor_update() to work
819 * around this problem with the watermark code.
821 if (plane
->id
== PLANE_CURSOR
)
822 return plane_state
->hw
.fb
!= NULL
;
824 return plane_state
->uapi
.visible
;
827 static bool intel_crtc_active(struct intel_crtc
*crtc
)
829 /* Be paranoid as we can arrive here with only partial
830 * state retrieved from the hardware during setup.
832 * We can ditch the adjusted_mode.crtc_clock check as soon
833 * as Haswell has gained clock readout/fastboot support.
835 * We can ditch the crtc->primary->state->fb check as soon as we can
836 * properly reconstruct framebuffers.
838 * FIXME: The intel_crtc->active here should be switched to
839 * crtc->state->active once we have proper CRTC states wired up
842 return crtc
->active
&& crtc
->base
.primary
->state
->fb
&&
843 crtc
->config
->hw
.adjusted_mode
.crtc_clock
;
846 static struct intel_crtc
*single_enabled_crtc(struct drm_i915_private
*dev_priv
)
848 struct intel_crtc
*crtc
, *enabled
= NULL
;
850 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
851 if (intel_crtc_active(crtc
)) {
861 static void pnv_update_wm(struct intel_crtc
*unused_crtc
)
863 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
864 struct intel_crtc
*crtc
;
865 const struct cxsr_latency
*latency
;
869 latency
= intel_get_cxsr_latency(!IS_MOBILE(dev_priv
),
874 drm_dbg_kms(&dev_priv
->drm
,
875 "Unknown FSB/MEM found, disable CxSR\n");
876 intel_set_memory_cxsr(dev_priv
, false);
880 crtc
= single_enabled_crtc(dev_priv
);
882 const struct drm_display_mode
*adjusted_mode
=
883 &crtc
->config
->hw
.adjusted_mode
;
884 const struct drm_framebuffer
*fb
=
885 crtc
->base
.primary
->state
->fb
;
886 int cpp
= fb
->format
->cpp
[0];
887 int clock
= adjusted_mode
->crtc_clock
;
890 wm
= intel_calculate_wm(clock
, &pnv_display_wm
,
891 pnv_display_wm
.fifo_size
,
892 cpp
, latency
->display_sr
);
893 reg
= I915_READ(DSPFW1
);
894 reg
&= ~DSPFW_SR_MASK
;
895 reg
|= FW_WM(wm
, SR
);
896 I915_WRITE(DSPFW1
, reg
);
897 drm_dbg_kms(&dev_priv
->drm
, "DSPFW1 register is %x\n", reg
);
900 wm
= intel_calculate_wm(clock
, &pnv_cursor_wm
,
901 pnv_display_wm
.fifo_size
,
902 4, latency
->cursor_sr
);
903 reg
= I915_READ(DSPFW3
);
904 reg
&= ~DSPFW_CURSOR_SR_MASK
;
905 reg
|= FW_WM(wm
, CURSOR_SR
);
906 I915_WRITE(DSPFW3
, reg
);
908 /* Display HPLL off SR */
909 wm
= intel_calculate_wm(clock
, &pnv_display_hplloff_wm
,
910 pnv_display_hplloff_wm
.fifo_size
,
911 cpp
, latency
->display_hpll_disable
);
912 reg
= I915_READ(DSPFW3
);
913 reg
&= ~DSPFW_HPLL_SR_MASK
;
914 reg
|= FW_WM(wm
, HPLL_SR
);
915 I915_WRITE(DSPFW3
, reg
);
917 /* cursor HPLL off SR */
918 wm
= intel_calculate_wm(clock
, &pnv_cursor_hplloff_wm
,
919 pnv_display_hplloff_wm
.fifo_size
,
920 4, latency
->cursor_hpll_disable
);
921 reg
= I915_READ(DSPFW3
);
922 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
923 reg
|= FW_WM(wm
, HPLL_CURSOR
);
924 I915_WRITE(DSPFW3
, reg
);
925 drm_dbg_kms(&dev_priv
->drm
, "DSPFW3 register is %x\n", reg
);
927 intel_set_memory_cxsr(dev_priv
, true);
929 intel_set_memory_cxsr(dev_priv
, false);
934 * Documentation says:
935 * "If the line size is small, the TLB fetches can get in the way of the
936 * data fetches, causing some lag in the pixel data return which is not
937 * accounted for in the above formulas. The following adjustment only
938 * needs to be applied if eight whole lines fit in the buffer at once.
939 * The WM is adjusted upwards by the difference between the FIFO size
940 * and the size of 8 whole lines. This adjustment is always performed
941 * in the actual pixel depth regardless of whether FBC is enabled or not."
943 static unsigned int g4x_tlb_miss_wa(int fifo_size
, int width
, int cpp
)
945 int tlb_miss
= fifo_size
* 64 - width
* cpp
* 8;
947 return max(0, tlb_miss
);
950 static void g4x_write_wm_values(struct drm_i915_private
*dev_priv
,
951 const struct g4x_wm_values
*wm
)
955 for_each_pipe(dev_priv
, pipe
)
956 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv
, pipe
), wm
);
959 FW_WM(wm
->sr
.plane
, SR
) |
960 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
], CURSORB
) |
961 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
], PLANEB
) |
962 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
], PLANEA
));
964 (wm
->fbc_en
? DSPFW_FBC_SR_EN
: 0) |
965 FW_WM(wm
->sr
.fbc
, FBC_SR
) |
966 FW_WM(wm
->hpll
.fbc
, FBC_HPLL_SR
) |
967 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
], SPRITEB
) |
968 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
], CURSORA
) |
969 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
], SPRITEA
));
971 (wm
->hpll_en
? DSPFW_HPLL_SR_EN
: 0) |
972 FW_WM(wm
->sr
.cursor
, CURSOR_SR
) |
973 FW_WM(wm
->hpll
.cursor
, HPLL_CURSOR
) |
974 FW_WM(wm
->hpll
.plane
, HPLL_SR
));
976 POSTING_READ(DSPFW1
);
979 #define FW_WM_VLV(value, plane) \
980 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
982 static void vlv_write_wm_values(struct drm_i915_private
*dev_priv
,
983 const struct vlv_wm_values
*wm
)
987 for_each_pipe(dev_priv
, pipe
) {
988 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv
, pipe
), wm
);
990 I915_WRITE(VLV_DDL(pipe
),
991 (wm
->ddl
[pipe
].plane
[PLANE_CURSOR
] << DDL_CURSOR_SHIFT
) |
992 (wm
->ddl
[pipe
].plane
[PLANE_SPRITE1
] << DDL_SPRITE_SHIFT(1)) |
993 (wm
->ddl
[pipe
].plane
[PLANE_SPRITE0
] << DDL_SPRITE_SHIFT(0)) |
994 (wm
->ddl
[pipe
].plane
[PLANE_PRIMARY
] << DDL_PLANE_SHIFT
));
998 * Zero the (unused) WM1 watermarks, and also clear all the
999 * high order bits so that there are no out of bounds values
1000 * present in the registers during the reprogramming.
1002 I915_WRITE(DSPHOWM
, 0);
1003 I915_WRITE(DSPHOWM1
, 0);
1004 I915_WRITE(DSPFW4
, 0);
1005 I915_WRITE(DSPFW5
, 0);
1006 I915_WRITE(DSPFW6
, 0);
1009 FW_WM(wm
->sr
.plane
, SR
) |
1010 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
], CURSORB
) |
1011 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
], PLANEB
) |
1012 FW_WM_VLV(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
], PLANEA
));
1014 FW_WM_VLV(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
], SPRITEB
) |
1015 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
], CURSORA
) |
1016 FW_WM_VLV(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
], SPRITEA
));
1018 FW_WM(wm
->sr
.cursor
, CURSOR_SR
));
1020 if (IS_CHERRYVIEW(dev_priv
)) {
1021 I915_WRITE(DSPFW7_CHV
,
1022 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
], SPRITED
) |
1023 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
], SPRITEC
));
1024 I915_WRITE(DSPFW8_CHV
,
1025 FW_WM_VLV(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
], SPRITEF
) |
1026 FW_WM_VLV(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
], SPRITEE
));
1027 I915_WRITE(DSPFW9_CHV
,
1028 FW_WM_VLV(wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
], PLANEC
) |
1029 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_CURSOR
], CURSORC
));
1031 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
1032 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
] >> 8, SPRITEF_HI
) |
1033 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
] >> 8, SPRITEE_HI
) |
1034 FW_WM(wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
] >> 8, PLANEC_HI
) |
1035 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] >> 8, SPRITED_HI
) |
1036 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] >> 8, SPRITEC_HI
) |
1037 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] >> 8, PLANEB_HI
) |
1038 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] >> 8, SPRITEB_HI
) |
1039 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] >> 8, SPRITEA_HI
) |
1040 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] >> 8, PLANEA_HI
));
1043 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
], SPRITED
) |
1044 FW_WM_VLV(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
], SPRITEC
));
1046 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
1047 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] >> 8, SPRITED_HI
) |
1048 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] >> 8, SPRITEC_HI
) |
1049 FW_WM(wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] >> 8, PLANEB_HI
) |
1050 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] >> 8, SPRITEB_HI
) |
1051 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] >> 8, SPRITEA_HI
) |
1052 FW_WM(wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] >> 8, PLANEA_HI
));
1055 POSTING_READ(DSPFW1
);
1060 static void g4x_setup_wm_latency(struct drm_i915_private
*dev_priv
)
1062 /* all latencies in usec */
1063 dev_priv
->wm
.pri_latency
[G4X_WM_LEVEL_NORMAL
] = 5;
1064 dev_priv
->wm
.pri_latency
[G4X_WM_LEVEL_SR
] = 12;
1065 dev_priv
->wm
.pri_latency
[G4X_WM_LEVEL_HPLL
] = 35;
1067 dev_priv
->wm
.max_level
= G4X_WM_LEVEL_HPLL
;
1070 static int g4x_plane_fifo_size(enum plane_id plane_id
, int level
)
1073 * DSPCNTR[13] supposedly controls whether the
1074 * primary plane can use the FIFO space otherwise
1075 * reserved for the sprite plane. It's not 100% clear
1076 * what the actual FIFO size is, but it looks like we
1077 * can happily set both primary and sprite watermarks
1078 * up to 127 cachelines. So that would seem to mean
1079 * that either DSPCNTR[13] doesn't do anything, or that
1080 * the total FIFO is >= 256 cachelines in size. Either
1081 * way, we don't seem to have to worry about this
1082 * repartitioning as the maximum watermark value the
1083 * register can hold for each plane is lower than the
1084 * minimum FIFO size.
1090 return level
== G4X_WM_LEVEL_NORMAL
? 127 : 511;
1092 return level
== G4X_WM_LEVEL_NORMAL
? 127 : 0;
1094 MISSING_CASE(plane_id
);
1099 static int g4x_fbc_fifo_size(int level
)
1102 case G4X_WM_LEVEL_SR
:
1104 case G4X_WM_LEVEL_HPLL
:
1107 MISSING_CASE(level
);
1112 static u16
g4x_compute_wm(const struct intel_crtc_state
*crtc_state
,
1113 const struct intel_plane_state
*plane_state
,
1116 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
1117 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
1118 const struct drm_display_mode
*adjusted_mode
=
1119 &crtc_state
->hw
.adjusted_mode
;
1120 unsigned int latency
= dev_priv
->wm
.pri_latency
[level
] * 10;
1121 unsigned int clock
, htotal
, cpp
, width
, wm
;
1126 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
1129 cpp
= plane_state
->hw
.fb
->format
->cpp
[0];
1132 * Not 100% sure which way ELK should go here as the
1133 * spec only says CL/CTG should assume 32bpp and BW
1134 * doesn't need to. But as these things followed the
1135 * mobile vs. desktop lines on gen3 as well, let's
1136 * assume ELK doesn't need this.
1138 * The spec also fails to list such a restriction for
1139 * the HPLL watermark, which seems a little strange.
1140 * Let's use 32bpp for the HPLL watermark as well.
1142 if (IS_GM45(dev_priv
) && plane
->id
== PLANE_PRIMARY
&&
1143 level
!= G4X_WM_LEVEL_NORMAL
)
1146 clock
= adjusted_mode
->crtc_clock
;
1147 htotal
= adjusted_mode
->crtc_htotal
;
1149 width
= drm_rect_width(&plane_state
->uapi
.dst
);
1151 if (plane
->id
== PLANE_CURSOR
) {
1152 wm
= intel_wm_method2(clock
, htotal
, width
, cpp
, latency
);
1153 } else if (plane
->id
== PLANE_PRIMARY
&&
1154 level
== G4X_WM_LEVEL_NORMAL
) {
1155 wm
= intel_wm_method1(clock
, cpp
, latency
);
1157 unsigned int small
, large
;
1159 small
= intel_wm_method1(clock
, cpp
, latency
);
1160 large
= intel_wm_method2(clock
, htotal
, width
, cpp
, latency
);
1162 wm
= min(small
, large
);
1165 wm
+= g4x_tlb_miss_wa(g4x_plane_fifo_size(plane
->id
, level
),
1168 wm
= DIV_ROUND_UP(wm
, 64) + 2;
1170 return min_t(unsigned int, wm
, USHRT_MAX
);
1173 static bool g4x_raw_plane_wm_set(struct intel_crtc_state
*crtc_state
,
1174 int level
, enum plane_id plane_id
, u16 value
)
1176 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
1179 for (; level
< intel_wm_num_levels(dev_priv
); level
++) {
1180 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1182 dirty
|= raw
->plane
[plane_id
] != value
;
1183 raw
->plane
[plane_id
] = value
;
1189 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state
*crtc_state
,
1190 int level
, u16 value
)
1192 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
1195 /* NORMAL level doesn't have an FBC watermark */
1196 level
= max(level
, G4X_WM_LEVEL_SR
);
1198 for (; level
< intel_wm_num_levels(dev_priv
); level
++) {
1199 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1201 dirty
|= raw
->fbc
!= value
;
1208 static u32
ilk_compute_fbc_wm(const struct intel_crtc_state
*crtc_state
,
1209 const struct intel_plane_state
*plane_state
,
1212 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state
*crtc_state
,
1213 const struct intel_plane_state
*plane_state
)
1215 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
1216 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
1217 int num_levels
= intel_wm_num_levels(to_i915(plane
->base
.dev
));
1218 enum plane_id plane_id
= plane
->id
;
1222 if (!intel_wm_plane_visible(crtc_state
, plane_state
)) {
1223 dirty
|= g4x_raw_plane_wm_set(crtc_state
, 0, plane_id
, 0);
1224 if (plane_id
== PLANE_PRIMARY
)
1225 dirty
|= g4x_raw_fbc_wm_set(crtc_state
, 0, 0);
1229 for (level
= 0; level
< num_levels
; level
++) {
1230 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1233 wm
= g4x_compute_wm(crtc_state
, plane_state
, level
);
1234 max_wm
= g4x_plane_fifo_size(plane_id
, level
);
1239 dirty
|= raw
->plane
[plane_id
] != wm
;
1240 raw
->plane
[plane_id
] = wm
;
1242 if (plane_id
!= PLANE_PRIMARY
||
1243 level
== G4X_WM_LEVEL_NORMAL
)
1246 wm
= ilk_compute_fbc_wm(crtc_state
, plane_state
,
1247 raw
->plane
[plane_id
]);
1248 max_wm
= g4x_fbc_fifo_size(level
);
1251 * FBC wm is not mandatory as we
1252 * can always just disable its use.
1257 dirty
|= raw
->fbc
!= wm
;
1261 /* mark watermarks as invalid */
1262 dirty
|= g4x_raw_plane_wm_set(crtc_state
, level
, plane_id
, USHRT_MAX
);
1264 if (plane_id
== PLANE_PRIMARY
)
1265 dirty
|= g4x_raw_fbc_wm_set(crtc_state
, level
, USHRT_MAX
);
1269 drm_dbg_kms(&dev_priv
->drm
,
1270 "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1272 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_NORMAL
].plane
[plane_id
],
1273 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_SR
].plane
[plane_id
],
1274 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_HPLL
].plane
[plane_id
]);
1276 if (plane_id
== PLANE_PRIMARY
)
1277 drm_dbg_kms(&dev_priv
->drm
,
1278 "FBC watermarks: SR=%d, HPLL=%d\n",
1279 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_SR
].fbc
,
1280 crtc_state
->wm
.g4x
.raw
[G4X_WM_LEVEL_HPLL
].fbc
);
1286 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state
*crtc_state
,
1287 enum plane_id plane_id
, int level
)
1289 const struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1291 return raw
->plane
[plane_id
] <= g4x_plane_fifo_size(plane_id
, level
);
1294 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state
*crtc_state
,
1297 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
1299 if (level
> dev_priv
->wm
.max_level
)
1302 return g4x_raw_plane_wm_is_valid(crtc_state
, PLANE_PRIMARY
, level
) &&
1303 g4x_raw_plane_wm_is_valid(crtc_state
, PLANE_SPRITE0
, level
) &&
1304 g4x_raw_plane_wm_is_valid(crtc_state
, PLANE_CURSOR
, level
);
1307 /* mark all levels starting from 'level' as invalid */
1308 static void g4x_invalidate_wms(struct intel_crtc
*crtc
,
1309 struct g4x_wm_state
*wm_state
, int level
)
1311 if (level
<= G4X_WM_LEVEL_NORMAL
) {
1312 enum plane_id plane_id
;
1314 for_each_plane_id_on_crtc(crtc
, plane_id
)
1315 wm_state
->wm
.plane
[plane_id
] = USHRT_MAX
;
1318 if (level
<= G4X_WM_LEVEL_SR
) {
1319 wm_state
->cxsr
= false;
1320 wm_state
->sr
.cursor
= USHRT_MAX
;
1321 wm_state
->sr
.plane
= USHRT_MAX
;
1322 wm_state
->sr
.fbc
= USHRT_MAX
;
1325 if (level
<= G4X_WM_LEVEL_HPLL
) {
1326 wm_state
->hpll_en
= false;
1327 wm_state
->hpll
.cursor
= USHRT_MAX
;
1328 wm_state
->hpll
.plane
= USHRT_MAX
;
1329 wm_state
->hpll
.fbc
= USHRT_MAX
;
1333 static int g4x_compute_pipe_wm(struct intel_crtc_state
*crtc_state
)
1335 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
1336 struct intel_atomic_state
*state
=
1337 to_intel_atomic_state(crtc_state
->uapi
.state
);
1338 struct g4x_wm_state
*wm_state
= &crtc_state
->wm
.g4x
.optimal
;
1339 int num_active_planes
= hweight8(crtc_state
->active_planes
&
1340 ~BIT(PLANE_CURSOR
));
1341 const struct g4x_pipe_wm
*raw
;
1342 const struct intel_plane_state
*old_plane_state
;
1343 const struct intel_plane_state
*new_plane_state
;
1344 struct intel_plane
*plane
;
1345 enum plane_id plane_id
;
1347 unsigned int dirty
= 0;
1349 for_each_oldnew_intel_plane_in_state(state
, plane
,
1351 new_plane_state
, i
) {
1352 if (new_plane_state
->hw
.crtc
!= &crtc
->base
&&
1353 old_plane_state
->hw
.crtc
!= &crtc
->base
)
1356 if (g4x_raw_plane_wm_compute(crtc_state
, new_plane_state
))
1357 dirty
|= BIT(plane
->id
);
1363 level
= G4X_WM_LEVEL_NORMAL
;
1364 if (!g4x_raw_crtc_wm_is_valid(crtc_state
, level
))
1367 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1368 for_each_plane_id_on_crtc(crtc
, plane_id
)
1369 wm_state
->wm
.plane
[plane_id
] = raw
->plane
[plane_id
];
1371 level
= G4X_WM_LEVEL_SR
;
1373 if (!g4x_raw_crtc_wm_is_valid(crtc_state
, level
))
1376 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1377 wm_state
->sr
.plane
= raw
->plane
[PLANE_PRIMARY
];
1378 wm_state
->sr
.cursor
= raw
->plane
[PLANE_CURSOR
];
1379 wm_state
->sr
.fbc
= raw
->fbc
;
1381 wm_state
->cxsr
= num_active_planes
== BIT(PLANE_PRIMARY
);
1383 level
= G4X_WM_LEVEL_HPLL
;
1385 if (!g4x_raw_crtc_wm_is_valid(crtc_state
, level
))
1388 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
1389 wm_state
->hpll
.plane
= raw
->plane
[PLANE_PRIMARY
];
1390 wm_state
->hpll
.cursor
= raw
->plane
[PLANE_CURSOR
];
1391 wm_state
->hpll
.fbc
= raw
->fbc
;
1393 wm_state
->hpll_en
= wm_state
->cxsr
;
1398 if (level
== G4X_WM_LEVEL_NORMAL
)
1401 /* invalidate the higher levels */
1402 g4x_invalidate_wms(crtc
, wm_state
, level
);
1405 * Determine if the FBC watermark(s) can be used. IF
1406 * this isn't the case we prefer to disable the FBC
1407 ( watermark(s) rather than disable the SR/HPLL
1408 * level(s) entirely.
1410 wm_state
->fbc_en
= level
> G4X_WM_LEVEL_NORMAL
;
1412 if (level
>= G4X_WM_LEVEL_SR
&&
1413 wm_state
->sr
.fbc
> g4x_fbc_fifo_size(G4X_WM_LEVEL_SR
))
1414 wm_state
->fbc_en
= false;
1415 else if (level
>= G4X_WM_LEVEL_HPLL
&&
1416 wm_state
->hpll
.fbc
> g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL
))
1417 wm_state
->fbc_en
= false;
1422 static int g4x_compute_intermediate_wm(struct intel_crtc_state
*new_crtc_state
)
1424 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->uapi
.crtc
);
1425 struct g4x_wm_state
*intermediate
= &new_crtc_state
->wm
.g4x
.intermediate
;
1426 const struct g4x_wm_state
*optimal
= &new_crtc_state
->wm
.g4x
.optimal
;
1427 struct intel_atomic_state
*intel_state
=
1428 to_intel_atomic_state(new_crtc_state
->uapi
.state
);
1429 const struct intel_crtc_state
*old_crtc_state
=
1430 intel_atomic_get_old_crtc_state(intel_state
, crtc
);
1431 const struct g4x_wm_state
*active
= &old_crtc_state
->wm
.g4x
.optimal
;
1432 enum plane_id plane_id
;
1434 if (!new_crtc_state
->hw
.active
|| drm_atomic_crtc_needs_modeset(&new_crtc_state
->uapi
)) {
1435 *intermediate
= *optimal
;
1437 intermediate
->cxsr
= false;
1438 intermediate
->hpll_en
= false;
1442 intermediate
->cxsr
= optimal
->cxsr
&& active
->cxsr
&&
1443 !new_crtc_state
->disable_cxsr
;
1444 intermediate
->hpll_en
= optimal
->hpll_en
&& active
->hpll_en
&&
1445 !new_crtc_state
->disable_cxsr
;
1446 intermediate
->fbc_en
= optimal
->fbc_en
&& active
->fbc_en
;
1448 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1449 intermediate
->wm
.plane
[plane_id
] =
1450 max(optimal
->wm
.plane
[plane_id
],
1451 active
->wm
.plane
[plane_id
]);
1453 WARN_ON(intermediate
->wm
.plane
[plane_id
] >
1454 g4x_plane_fifo_size(plane_id
, G4X_WM_LEVEL_NORMAL
));
1457 intermediate
->sr
.plane
= max(optimal
->sr
.plane
,
1459 intermediate
->sr
.cursor
= max(optimal
->sr
.cursor
,
1461 intermediate
->sr
.fbc
= max(optimal
->sr
.fbc
,
1464 intermediate
->hpll
.plane
= max(optimal
->hpll
.plane
,
1465 active
->hpll
.plane
);
1466 intermediate
->hpll
.cursor
= max(optimal
->hpll
.cursor
,
1467 active
->hpll
.cursor
);
1468 intermediate
->hpll
.fbc
= max(optimal
->hpll
.fbc
,
1471 WARN_ON((intermediate
->sr
.plane
>
1472 g4x_plane_fifo_size(PLANE_PRIMARY
, G4X_WM_LEVEL_SR
) ||
1473 intermediate
->sr
.cursor
>
1474 g4x_plane_fifo_size(PLANE_CURSOR
, G4X_WM_LEVEL_SR
)) &&
1475 intermediate
->cxsr
);
1476 WARN_ON((intermediate
->sr
.plane
>
1477 g4x_plane_fifo_size(PLANE_PRIMARY
, G4X_WM_LEVEL_HPLL
) ||
1478 intermediate
->sr
.cursor
>
1479 g4x_plane_fifo_size(PLANE_CURSOR
, G4X_WM_LEVEL_HPLL
)) &&
1480 intermediate
->hpll_en
);
1482 WARN_ON(intermediate
->sr
.fbc
> g4x_fbc_fifo_size(1) &&
1483 intermediate
->fbc_en
&& intermediate
->cxsr
);
1484 WARN_ON(intermediate
->hpll
.fbc
> g4x_fbc_fifo_size(2) &&
1485 intermediate
->fbc_en
&& intermediate
->hpll_en
);
1489 * If our intermediate WM are identical to the final WM, then we can
1490 * omit the post-vblank programming; only update if it's different.
1492 if (memcmp(intermediate
, optimal
, sizeof(*intermediate
)) != 0)
1493 new_crtc_state
->wm
.need_postvbl_update
= true;
1498 static void g4x_merge_wm(struct drm_i915_private
*dev_priv
,
1499 struct g4x_wm_values
*wm
)
1501 struct intel_crtc
*crtc
;
1502 int num_active_pipes
= 0;
1508 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
1509 const struct g4x_wm_state
*wm_state
= &crtc
->wm
.active
.g4x
;
1514 if (!wm_state
->cxsr
)
1516 if (!wm_state
->hpll_en
)
1517 wm
->hpll_en
= false;
1518 if (!wm_state
->fbc_en
)
1524 if (num_active_pipes
!= 1) {
1526 wm
->hpll_en
= false;
1530 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
1531 const struct g4x_wm_state
*wm_state
= &crtc
->wm
.active
.g4x
;
1532 enum pipe pipe
= crtc
->pipe
;
1534 wm
->pipe
[pipe
] = wm_state
->wm
;
1535 if (crtc
->active
&& wm
->cxsr
)
1536 wm
->sr
= wm_state
->sr
;
1537 if (crtc
->active
&& wm
->hpll_en
)
1538 wm
->hpll
= wm_state
->hpll
;
1542 static void g4x_program_watermarks(struct drm_i915_private
*dev_priv
)
1544 struct g4x_wm_values
*old_wm
= &dev_priv
->wm
.g4x
;
1545 struct g4x_wm_values new_wm
= {};
1547 g4x_merge_wm(dev_priv
, &new_wm
);
1549 if (memcmp(old_wm
, &new_wm
, sizeof(new_wm
)) == 0)
1552 if (is_disabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
1553 _intel_set_memory_cxsr(dev_priv
, false);
1555 g4x_write_wm_values(dev_priv
, &new_wm
);
1557 if (is_enabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
1558 _intel_set_memory_cxsr(dev_priv
, true);
1563 static void g4x_initial_watermarks(struct intel_atomic_state
*state
,
1564 struct intel_crtc
*crtc
)
1566 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1567 const struct intel_crtc_state
*crtc_state
=
1568 intel_atomic_get_new_crtc_state(state
, crtc
);
1570 mutex_lock(&dev_priv
->wm
.wm_mutex
);
1571 crtc
->wm
.active
.g4x
= crtc_state
->wm
.g4x
.intermediate
;
1572 g4x_program_watermarks(dev_priv
);
1573 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
1576 static void g4x_optimize_watermarks(struct intel_atomic_state
*state
,
1577 struct intel_crtc
*crtc
)
1579 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1580 const struct intel_crtc_state
*crtc_state
=
1581 intel_atomic_get_new_crtc_state(state
, crtc
);
1583 if (!crtc_state
->wm
.need_postvbl_update
)
1586 mutex_lock(&dev_priv
->wm
.wm_mutex
);
1587 crtc
->wm
.active
.g4x
= crtc_state
->wm
.g4x
.optimal
;
1588 g4x_program_watermarks(dev_priv
);
1589 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
1592 /* latency must be in 0.1us units. */
1593 static unsigned int vlv_wm_method2(unsigned int pixel_rate
,
1594 unsigned int htotal
,
1597 unsigned int latency
)
1601 ret
= intel_wm_method2(pixel_rate
, htotal
,
1602 width
, cpp
, latency
);
1603 ret
= DIV_ROUND_UP(ret
, 64);
1608 static void vlv_setup_wm_latency(struct drm_i915_private
*dev_priv
)
1610 /* all latencies in usec */
1611 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM2
] = 3;
1613 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM2
;
1615 if (IS_CHERRYVIEW(dev_priv
)) {
1616 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM5
] = 12;
1617 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_DDR_DVFS
] = 33;
1619 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_DDR_DVFS
;
1623 static u16
vlv_compute_wm_level(const struct intel_crtc_state
*crtc_state
,
1624 const struct intel_plane_state
*plane_state
,
1627 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
1628 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
1629 const struct drm_display_mode
*adjusted_mode
=
1630 &crtc_state
->hw
.adjusted_mode
;
1631 unsigned int clock
, htotal
, cpp
, width
, wm
;
1633 if (dev_priv
->wm
.pri_latency
[level
] == 0)
1636 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
1639 cpp
= plane_state
->hw
.fb
->format
->cpp
[0];
1640 clock
= adjusted_mode
->crtc_clock
;
1641 htotal
= adjusted_mode
->crtc_htotal
;
1642 width
= crtc_state
->pipe_src_w
;
1644 if (plane
->id
== PLANE_CURSOR
) {
1646 * FIXME the formula gives values that are
1647 * too big for the cursor FIFO, and hence we
1648 * would never be able to use cursors. For
1649 * now just hardcode the watermark.
1653 wm
= vlv_wm_method2(clock
, htotal
, width
, cpp
,
1654 dev_priv
->wm
.pri_latency
[level
] * 10);
1657 return min_t(unsigned int, wm
, USHRT_MAX
);
1660 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes
)
1662 return (active_planes
& (BIT(PLANE_SPRITE0
) |
1663 BIT(PLANE_SPRITE1
))) == BIT(PLANE_SPRITE1
);
1666 static int vlv_compute_fifo(struct intel_crtc_state
*crtc_state
)
1668 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
1669 const struct g4x_pipe_wm
*raw
=
1670 &crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_PM2
];
1671 struct vlv_fifo_state
*fifo_state
= &crtc_state
->wm
.vlv
.fifo_state
;
1672 unsigned int active_planes
= crtc_state
->active_planes
& ~BIT(PLANE_CURSOR
);
1673 int num_active_planes
= hweight8(active_planes
);
1674 const int fifo_size
= 511;
1675 int fifo_extra
, fifo_left
= fifo_size
;
1676 int sprite0_fifo_extra
= 0;
1677 unsigned int total_rate
;
1678 enum plane_id plane_id
;
1681 * When enabling sprite0 after sprite1 has already been enabled
1682 * we tend to get an underrun unless sprite0 already has some
1683 * FIFO space allcoated. Hence we always allocate at least one
1684 * cacheline for sprite0 whenever sprite1 is enabled.
1686 * All other plane enable sequences appear immune to this problem.
1688 if (vlv_need_sprite0_fifo_workaround(active_planes
))
1689 sprite0_fifo_extra
= 1;
1691 total_rate
= raw
->plane
[PLANE_PRIMARY
] +
1692 raw
->plane
[PLANE_SPRITE0
] +
1693 raw
->plane
[PLANE_SPRITE1
] +
1696 if (total_rate
> fifo_size
)
1699 if (total_rate
== 0)
1702 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1705 if ((active_planes
& BIT(plane_id
)) == 0) {
1706 fifo_state
->plane
[plane_id
] = 0;
1710 rate
= raw
->plane
[plane_id
];
1711 fifo_state
->plane
[plane_id
] = fifo_size
* rate
/ total_rate
;
1712 fifo_left
-= fifo_state
->plane
[plane_id
];
1715 fifo_state
->plane
[PLANE_SPRITE0
] += sprite0_fifo_extra
;
1716 fifo_left
-= sprite0_fifo_extra
;
1718 fifo_state
->plane
[PLANE_CURSOR
] = 63;
1720 fifo_extra
= DIV_ROUND_UP(fifo_left
, num_active_planes
?: 1);
1722 /* spread the remainder evenly */
1723 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1729 if ((active_planes
& BIT(plane_id
)) == 0)
1732 plane_extra
= min(fifo_extra
, fifo_left
);
1733 fifo_state
->plane
[plane_id
] += plane_extra
;
1734 fifo_left
-= plane_extra
;
1737 WARN_ON(active_planes
!= 0 && fifo_left
!= 0);
1739 /* give it all to the first plane if none are active */
1740 if (active_planes
== 0) {
1741 WARN_ON(fifo_left
!= fifo_size
);
1742 fifo_state
->plane
[PLANE_PRIMARY
] = fifo_left
;
1748 /* mark all levels starting from 'level' as invalid */
1749 static void vlv_invalidate_wms(struct intel_crtc
*crtc
,
1750 struct vlv_wm_state
*wm_state
, int level
)
1752 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1754 for (; level
< intel_wm_num_levels(dev_priv
); level
++) {
1755 enum plane_id plane_id
;
1757 for_each_plane_id_on_crtc(crtc
, plane_id
)
1758 wm_state
->wm
[level
].plane
[plane_id
] = USHRT_MAX
;
1760 wm_state
->sr
[level
].cursor
= USHRT_MAX
;
1761 wm_state
->sr
[level
].plane
= USHRT_MAX
;
1765 static u16
vlv_invert_wm_value(u16 wm
, u16 fifo_size
)
1770 return fifo_size
- wm
;
1774 * Starting from 'level' set all higher
1775 * levels to 'value' in the "raw" watermarks.
1777 static bool vlv_raw_plane_wm_set(struct intel_crtc_state
*crtc_state
,
1778 int level
, enum plane_id plane_id
, u16 value
)
1780 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
1781 int num_levels
= intel_wm_num_levels(dev_priv
);
1784 for (; level
< num_levels
; level
++) {
1785 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.vlv
.raw
[level
];
1787 dirty
|= raw
->plane
[plane_id
] != value
;
1788 raw
->plane
[plane_id
] = value
;
1794 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state
*crtc_state
,
1795 const struct intel_plane_state
*plane_state
)
1797 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
1798 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
1799 enum plane_id plane_id
= plane
->id
;
1800 int num_levels
= intel_wm_num_levels(to_i915(plane
->base
.dev
));
1804 if (!intel_wm_plane_visible(crtc_state
, plane_state
)) {
1805 dirty
|= vlv_raw_plane_wm_set(crtc_state
, 0, plane_id
, 0);
1809 for (level
= 0; level
< num_levels
; level
++) {
1810 struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.vlv
.raw
[level
];
1811 int wm
= vlv_compute_wm_level(crtc_state
, plane_state
, level
);
1812 int max_wm
= plane_id
== PLANE_CURSOR
? 63 : 511;
1817 dirty
|= raw
->plane
[plane_id
] != wm
;
1818 raw
->plane
[plane_id
] = wm
;
1821 /* mark all higher levels as invalid */
1822 dirty
|= vlv_raw_plane_wm_set(crtc_state
, level
, plane_id
, USHRT_MAX
);
1826 drm_dbg_kms(&dev_priv
->drm
,
1827 "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1829 crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_PM2
].plane
[plane_id
],
1830 crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_PM5
].plane
[plane_id
],
1831 crtc_state
->wm
.vlv
.raw
[VLV_WM_LEVEL_DDR_DVFS
].plane
[plane_id
]);
1836 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state
*crtc_state
,
1837 enum plane_id plane_id
, int level
)
1839 const struct g4x_pipe_wm
*raw
=
1840 &crtc_state
->wm
.vlv
.raw
[level
];
1841 const struct vlv_fifo_state
*fifo_state
=
1842 &crtc_state
->wm
.vlv
.fifo_state
;
1844 return raw
->plane
[plane_id
] <= fifo_state
->plane
[plane_id
];
1847 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state
*crtc_state
, int level
)
1849 return vlv_raw_plane_wm_is_valid(crtc_state
, PLANE_PRIMARY
, level
) &&
1850 vlv_raw_plane_wm_is_valid(crtc_state
, PLANE_SPRITE0
, level
) &&
1851 vlv_raw_plane_wm_is_valid(crtc_state
, PLANE_SPRITE1
, level
) &&
1852 vlv_raw_plane_wm_is_valid(crtc_state
, PLANE_CURSOR
, level
);
1855 static int vlv_compute_pipe_wm(struct intel_crtc_state
*crtc_state
)
1857 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
1858 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1859 struct intel_atomic_state
*state
=
1860 to_intel_atomic_state(crtc_state
->uapi
.state
);
1861 struct vlv_wm_state
*wm_state
= &crtc_state
->wm
.vlv
.optimal
;
1862 const struct vlv_fifo_state
*fifo_state
=
1863 &crtc_state
->wm
.vlv
.fifo_state
;
1864 int num_active_planes
= hweight8(crtc_state
->active_planes
&
1865 ~BIT(PLANE_CURSOR
));
1866 bool needs_modeset
= drm_atomic_crtc_needs_modeset(&crtc_state
->uapi
);
1867 const struct intel_plane_state
*old_plane_state
;
1868 const struct intel_plane_state
*new_plane_state
;
1869 struct intel_plane
*plane
;
1870 enum plane_id plane_id
;
1872 unsigned int dirty
= 0;
1874 for_each_oldnew_intel_plane_in_state(state
, plane
,
1876 new_plane_state
, i
) {
1877 if (new_plane_state
->hw
.crtc
!= &crtc
->base
&&
1878 old_plane_state
->hw
.crtc
!= &crtc
->base
)
1881 if (vlv_raw_plane_wm_compute(crtc_state
, new_plane_state
))
1882 dirty
|= BIT(plane
->id
);
1886 * DSPARB registers may have been reset due to the
1887 * power well being turned off. Make sure we restore
1888 * them to a consistent state even if no primary/sprite
1889 * planes are initially active.
1892 crtc_state
->fifo_changed
= true;
1897 /* cursor changes don't warrant a FIFO recompute */
1898 if (dirty
& ~BIT(PLANE_CURSOR
)) {
1899 const struct intel_crtc_state
*old_crtc_state
=
1900 intel_atomic_get_old_crtc_state(state
, crtc
);
1901 const struct vlv_fifo_state
*old_fifo_state
=
1902 &old_crtc_state
->wm
.vlv
.fifo_state
;
1904 ret
= vlv_compute_fifo(crtc_state
);
1908 if (needs_modeset
||
1909 memcmp(old_fifo_state
, fifo_state
,
1910 sizeof(*fifo_state
)) != 0)
1911 crtc_state
->fifo_changed
= true;
1914 /* initially allow all levels */
1915 wm_state
->num_levels
= intel_wm_num_levels(dev_priv
);
1917 * Note that enabling cxsr with no primary/sprite planes
1918 * enabled can wedge the pipe. Hence we only allow cxsr
1919 * with exactly one enabled primary/sprite plane.
1921 wm_state
->cxsr
= crtc
->pipe
!= PIPE_C
&& num_active_planes
== 1;
1923 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1924 const struct g4x_pipe_wm
*raw
= &crtc_state
->wm
.vlv
.raw
[level
];
1925 const int sr_fifo_size
= INTEL_NUM_PIPES(dev_priv
) * 512 - 1;
1927 if (!vlv_raw_crtc_wm_is_valid(crtc_state
, level
))
1930 for_each_plane_id_on_crtc(crtc
, plane_id
) {
1931 wm_state
->wm
[level
].plane
[plane_id
] =
1932 vlv_invert_wm_value(raw
->plane
[plane_id
],
1933 fifo_state
->plane
[plane_id
]);
1936 wm_state
->sr
[level
].plane
=
1937 vlv_invert_wm_value(max3(raw
->plane
[PLANE_PRIMARY
],
1938 raw
->plane
[PLANE_SPRITE0
],
1939 raw
->plane
[PLANE_SPRITE1
]),
1942 wm_state
->sr
[level
].cursor
=
1943 vlv_invert_wm_value(raw
->plane
[PLANE_CURSOR
],
1950 /* limit to only levels we can actually handle */
1951 wm_state
->num_levels
= level
;
1953 /* invalidate the higher levels */
1954 vlv_invalidate_wms(crtc
, wm_state
, level
);
1959 #define VLV_FIFO(plane, value) \
1960 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1962 static void vlv_atomic_update_fifo(struct intel_atomic_state
*state
,
1963 struct intel_crtc
*crtc
)
1965 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
1966 struct intel_uncore
*uncore
= &dev_priv
->uncore
;
1967 const struct intel_crtc_state
*crtc_state
=
1968 intel_atomic_get_new_crtc_state(state
, crtc
);
1969 const struct vlv_fifo_state
*fifo_state
=
1970 &crtc_state
->wm
.vlv
.fifo_state
;
1971 int sprite0_start
, sprite1_start
, fifo_size
;
1973 if (!crtc_state
->fifo_changed
)
1976 sprite0_start
= fifo_state
->plane
[PLANE_PRIMARY
];
1977 sprite1_start
= fifo_state
->plane
[PLANE_SPRITE0
] + sprite0_start
;
1978 fifo_size
= fifo_state
->plane
[PLANE_SPRITE1
] + sprite1_start
;
1980 WARN_ON(fifo_state
->plane
[PLANE_CURSOR
] != 63);
1981 WARN_ON(fifo_size
!= 511);
1983 trace_vlv_fifo_size(crtc
, sprite0_start
, sprite1_start
, fifo_size
);
1986 * uncore.lock serves a double purpose here. It allows us to
1987 * use the less expensive I915_{READ,WRITE}_FW() functions, and
1988 * it protects the DSPARB registers from getting clobbered by
1989 * parallel updates from multiple pipes.
1991 * intel_pipe_update_start() has already disabled interrupts
1992 * for us, so a plain spin_lock() is sufficient here.
1994 spin_lock(&uncore
->lock
);
1996 switch (crtc
->pipe
) {
1997 u32 dsparb
, dsparb2
, dsparb3
;
1999 dsparb
= intel_uncore_read_fw(uncore
, DSPARB
);
2000 dsparb2
= intel_uncore_read_fw(uncore
, DSPARB2
);
2002 dsparb
&= ~(VLV_FIFO(SPRITEA
, 0xff) |
2003 VLV_FIFO(SPRITEB
, 0xff));
2004 dsparb
|= (VLV_FIFO(SPRITEA
, sprite0_start
) |
2005 VLV_FIFO(SPRITEB
, sprite1_start
));
2007 dsparb2
&= ~(VLV_FIFO(SPRITEA_HI
, 0x1) |
2008 VLV_FIFO(SPRITEB_HI
, 0x1));
2009 dsparb2
|= (VLV_FIFO(SPRITEA_HI
, sprite0_start
>> 8) |
2010 VLV_FIFO(SPRITEB_HI
, sprite1_start
>> 8));
2012 intel_uncore_write_fw(uncore
, DSPARB
, dsparb
);
2013 intel_uncore_write_fw(uncore
, DSPARB2
, dsparb2
);
2016 dsparb
= intel_uncore_read_fw(uncore
, DSPARB
);
2017 dsparb2
= intel_uncore_read_fw(uncore
, DSPARB2
);
2019 dsparb
&= ~(VLV_FIFO(SPRITEC
, 0xff) |
2020 VLV_FIFO(SPRITED
, 0xff));
2021 dsparb
|= (VLV_FIFO(SPRITEC
, sprite0_start
) |
2022 VLV_FIFO(SPRITED
, sprite1_start
));
2024 dsparb2
&= ~(VLV_FIFO(SPRITEC_HI
, 0xff) |
2025 VLV_FIFO(SPRITED_HI
, 0xff));
2026 dsparb2
|= (VLV_FIFO(SPRITEC_HI
, sprite0_start
>> 8) |
2027 VLV_FIFO(SPRITED_HI
, sprite1_start
>> 8));
2029 intel_uncore_write_fw(uncore
, DSPARB
, dsparb
);
2030 intel_uncore_write_fw(uncore
, DSPARB2
, dsparb2
);
2033 dsparb3
= intel_uncore_read_fw(uncore
, DSPARB3
);
2034 dsparb2
= intel_uncore_read_fw(uncore
, DSPARB2
);
2036 dsparb3
&= ~(VLV_FIFO(SPRITEE
, 0xff) |
2037 VLV_FIFO(SPRITEF
, 0xff));
2038 dsparb3
|= (VLV_FIFO(SPRITEE
, sprite0_start
) |
2039 VLV_FIFO(SPRITEF
, sprite1_start
));
2041 dsparb2
&= ~(VLV_FIFO(SPRITEE_HI
, 0xff) |
2042 VLV_FIFO(SPRITEF_HI
, 0xff));
2043 dsparb2
|= (VLV_FIFO(SPRITEE_HI
, sprite0_start
>> 8) |
2044 VLV_FIFO(SPRITEF_HI
, sprite1_start
>> 8));
2046 intel_uncore_write_fw(uncore
, DSPARB3
, dsparb3
);
2047 intel_uncore_write_fw(uncore
, DSPARB2
, dsparb2
);
2053 intel_uncore_posting_read_fw(uncore
, DSPARB
);
2055 spin_unlock(&uncore
->lock
);
2060 static int vlv_compute_intermediate_wm(struct intel_crtc_state
*new_crtc_state
)
2062 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->uapi
.crtc
);
2063 struct vlv_wm_state
*intermediate
= &new_crtc_state
->wm
.vlv
.intermediate
;
2064 const struct vlv_wm_state
*optimal
= &new_crtc_state
->wm
.vlv
.optimal
;
2065 struct intel_atomic_state
*intel_state
=
2066 to_intel_atomic_state(new_crtc_state
->uapi
.state
);
2067 const struct intel_crtc_state
*old_crtc_state
=
2068 intel_atomic_get_old_crtc_state(intel_state
, crtc
);
2069 const struct vlv_wm_state
*active
= &old_crtc_state
->wm
.vlv
.optimal
;
2072 if (!new_crtc_state
->hw
.active
|| drm_atomic_crtc_needs_modeset(&new_crtc_state
->uapi
)) {
2073 *intermediate
= *optimal
;
2075 intermediate
->cxsr
= false;
2079 intermediate
->num_levels
= min(optimal
->num_levels
, active
->num_levels
);
2080 intermediate
->cxsr
= optimal
->cxsr
&& active
->cxsr
&&
2081 !new_crtc_state
->disable_cxsr
;
2083 for (level
= 0; level
< intermediate
->num_levels
; level
++) {
2084 enum plane_id plane_id
;
2086 for_each_plane_id_on_crtc(crtc
, plane_id
) {
2087 intermediate
->wm
[level
].plane
[plane_id
] =
2088 min(optimal
->wm
[level
].plane
[plane_id
],
2089 active
->wm
[level
].plane
[plane_id
]);
2092 intermediate
->sr
[level
].plane
= min(optimal
->sr
[level
].plane
,
2093 active
->sr
[level
].plane
);
2094 intermediate
->sr
[level
].cursor
= min(optimal
->sr
[level
].cursor
,
2095 active
->sr
[level
].cursor
);
2098 vlv_invalidate_wms(crtc
, intermediate
, level
);
2102 * If our intermediate WM are identical to the final WM, then we can
2103 * omit the post-vblank programming; only update if it's different.
2105 if (memcmp(intermediate
, optimal
, sizeof(*intermediate
)) != 0)
2106 new_crtc_state
->wm
.need_postvbl_update
= true;
2111 static void vlv_merge_wm(struct drm_i915_private
*dev_priv
,
2112 struct vlv_wm_values
*wm
)
2114 struct intel_crtc
*crtc
;
2115 int num_active_pipes
= 0;
2117 wm
->level
= dev_priv
->wm
.max_level
;
2120 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
2121 const struct vlv_wm_state
*wm_state
= &crtc
->wm
.active
.vlv
;
2126 if (!wm_state
->cxsr
)
2130 wm
->level
= min_t(int, wm
->level
, wm_state
->num_levels
- 1);
2133 if (num_active_pipes
!= 1)
2136 if (num_active_pipes
> 1)
2137 wm
->level
= VLV_WM_LEVEL_PM2
;
2139 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
2140 const struct vlv_wm_state
*wm_state
= &crtc
->wm
.active
.vlv
;
2141 enum pipe pipe
= crtc
->pipe
;
2143 wm
->pipe
[pipe
] = wm_state
->wm
[wm
->level
];
2144 if (crtc
->active
&& wm
->cxsr
)
2145 wm
->sr
= wm_state
->sr
[wm
->level
];
2147 wm
->ddl
[pipe
].plane
[PLANE_PRIMARY
] = DDL_PRECISION_HIGH
| 2;
2148 wm
->ddl
[pipe
].plane
[PLANE_SPRITE0
] = DDL_PRECISION_HIGH
| 2;
2149 wm
->ddl
[pipe
].plane
[PLANE_SPRITE1
] = DDL_PRECISION_HIGH
| 2;
2150 wm
->ddl
[pipe
].plane
[PLANE_CURSOR
] = DDL_PRECISION_HIGH
| 2;
2154 static void vlv_program_watermarks(struct drm_i915_private
*dev_priv
)
2156 struct vlv_wm_values
*old_wm
= &dev_priv
->wm
.vlv
;
2157 struct vlv_wm_values new_wm
= {};
2159 vlv_merge_wm(dev_priv
, &new_wm
);
2161 if (memcmp(old_wm
, &new_wm
, sizeof(new_wm
)) == 0)
2164 if (is_disabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_DDR_DVFS
))
2165 chv_set_memory_dvfs(dev_priv
, false);
2167 if (is_disabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_PM5
))
2168 chv_set_memory_pm5(dev_priv
, false);
2170 if (is_disabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
2171 _intel_set_memory_cxsr(dev_priv
, false);
2173 vlv_write_wm_values(dev_priv
, &new_wm
);
2175 if (is_enabling(old_wm
->cxsr
, new_wm
.cxsr
, true))
2176 _intel_set_memory_cxsr(dev_priv
, true);
2178 if (is_enabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_PM5
))
2179 chv_set_memory_pm5(dev_priv
, true);
2181 if (is_enabling(old_wm
->level
, new_wm
.level
, VLV_WM_LEVEL_DDR_DVFS
))
2182 chv_set_memory_dvfs(dev_priv
, true);
2187 static void vlv_initial_watermarks(struct intel_atomic_state
*state
,
2188 struct intel_crtc
*crtc
)
2190 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2191 const struct intel_crtc_state
*crtc_state
=
2192 intel_atomic_get_new_crtc_state(state
, crtc
);
2194 mutex_lock(&dev_priv
->wm
.wm_mutex
);
2195 crtc
->wm
.active
.vlv
= crtc_state
->wm
.vlv
.intermediate
;
2196 vlv_program_watermarks(dev_priv
);
2197 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
2200 static void vlv_optimize_watermarks(struct intel_atomic_state
*state
,
2201 struct intel_crtc
*crtc
)
2203 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2204 const struct intel_crtc_state
*crtc_state
=
2205 intel_atomic_get_new_crtc_state(state
, crtc
);
2207 if (!crtc_state
->wm
.need_postvbl_update
)
2210 mutex_lock(&dev_priv
->wm
.wm_mutex
);
2211 crtc
->wm
.active
.vlv
= crtc_state
->wm
.vlv
.optimal
;
2212 vlv_program_watermarks(dev_priv
);
2213 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
2216 static void i965_update_wm(struct intel_crtc
*unused_crtc
)
2218 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
2219 struct intel_crtc
*crtc
;
2224 /* Calc sr entries for one plane configs */
2225 crtc
= single_enabled_crtc(dev_priv
);
2227 /* self-refresh has much higher latency */
2228 static const int sr_latency_ns
= 12000;
2229 const struct drm_display_mode
*adjusted_mode
=
2230 &crtc
->config
->hw
.adjusted_mode
;
2231 const struct drm_framebuffer
*fb
=
2232 crtc
->base
.primary
->state
->fb
;
2233 int clock
= adjusted_mode
->crtc_clock
;
2234 int htotal
= adjusted_mode
->crtc_htotal
;
2235 int hdisplay
= crtc
->config
->pipe_src_w
;
2236 int cpp
= fb
->format
->cpp
[0];
2239 entries
= intel_wm_method2(clock
, htotal
,
2240 hdisplay
, cpp
, sr_latency_ns
/ 100);
2241 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
2242 srwm
= I965_FIFO_SIZE
- entries
;
2246 drm_dbg_kms(&dev_priv
->drm
,
2247 "self-refresh entries: %d, wm: %d\n",
2250 entries
= intel_wm_method2(clock
, htotal
,
2251 crtc
->base
.cursor
->state
->crtc_w
, 4,
2252 sr_latency_ns
/ 100);
2253 entries
= DIV_ROUND_UP(entries
,
2254 i965_cursor_wm_info
.cacheline_size
) +
2255 i965_cursor_wm_info
.guard_size
;
2257 cursor_sr
= i965_cursor_wm_info
.fifo_size
- entries
;
2258 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
2259 cursor_sr
= i965_cursor_wm_info
.max_wm
;
2261 drm_dbg_kms(&dev_priv
->drm
,
2262 "self-refresh watermark: display plane %d "
2263 "cursor %d\n", srwm
, cursor_sr
);
2265 cxsr_enabled
= true;
2267 cxsr_enabled
= false;
2268 /* Turn off self refresh if both pipes are enabled */
2269 intel_set_memory_cxsr(dev_priv
, false);
2272 drm_dbg_kms(&dev_priv
->drm
,
2273 "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2276 /* 965 has limitations... */
2277 I915_WRITE(DSPFW1
, FW_WM(srwm
, SR
) |
2281 I915_WRITE(DSPFW2
, FW_WM(8, CURSORA
) |
2282 FW_WM(8, PLANEC_OLD
));
2283 /* update cursor SR watermark */
2284 I915_WRITE(DSPFW3
, FW_WM(cursor_sr
, CURSOR_SR
));
2287 intel_set_memory_cxsr(dev_priv
, true);
2292 static void i9xx_update_wm(struct intel_crtc
*unused_crtc
)
2294 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
2295 const struct intel_watermark_params
*wm_info
;
2300 int planea_wm
, planeb_wm
;
2301 struct intel_crtc
*crtc
, *enabled
= NULL
;
2303 if (IS_I945GM(dev_priv
))
2304 wm_info
= &i945_wm_info
;
2305 else if (!IS_GEN(dev_priv
, 2))
2306 wm_info
= &i915_wm_info
;
2308 wm_info
= &i830_a_wm_info
;
2310 fifo_size
= dev_priv
->display
.get_fifo_size(dev_priv
, PLANE_A
);
2311 crtc
= intel_get_crtc_for_plane(dev_priv
, PLANE_A
);
2312 if (intel_crtc_active(crtc
)) {
2313 const struct drm_display_mode
*adjusted_mode
=
2314 &crtc
->config
->hw
.adjusted_mode
;
2315 const struct drm_framebuffer
*fb
=
2316 crtc
->base
.primary
->state
->fb
;
2319 if (IS_GEN(dev_priv
, 2))
2322 cpp
= fb
->format
->cpp
[0];
2324 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
2325 wm_info
, fifo_size
, cpp
,
2326 pessimal_latency_ns
);
2329 planea_wm
= fifo_size
- wm_info
->guard_size
;
2330 if (planea_wm
> (long)wm_info
->max_wm
)
2331 planea_wm
= wm_info
->max_wm
;
2334 if (IS_GEN(dev_priv
, 2))
2335 wm_info
= &i830_bc_wm_info
;
2337 fifo_size
= dev_priv
->display
.get_fifo_size(dev_priv
, PLANE_B
);
2338 crtc
= intel_get_crtc_for_plane(dev_priv
, PLANE_B
);
2339 if (intel_crtc_active(crtc
)) {
2340 const struct drm_display_mode
*adjusted_mode
=
2341 &crtc
->config
->hw
.adjusted_mode
;
2342 const struct drm_framebuffer
*fb
=
2343 crtc
->base
.primary
->state
->fb
;
2346 if (IS_GEN(dev_priv
, 2))
2349 cpp
= fb
->format
->cpp
[0];
2351 planeb_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
2352 wm_info
, fifo_size
, cpp
,
2353 pessimal_latency_ns
);
2354 if (enabled
== NULL
)
2359 planeb_wm
= fifo_size
- wm_info
->guard_size
;
2360 if (planeb_wm
> (long)wm_info
->max_wm
)
2361 planeb_wm
= wm_info
->max_wm
;
2364 drm_dbg_kms(&dev_priv
->drm
,
2365 "FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
2367 if (IS_I915GM(dev_priv
) && enabled
) {
2368 struct drm_i915_gem_object
*obj
;
2370 obj
= intel_fb_obj(enabled
->base
.primary
->state
->fb
);
2372 /* self-refresh seems busted with untiled */
2373 if (!i915_gem_object_is_tiled(obj
))
2378 * Overlay gets an aggressive default since video jitter is bad.
2382 /* Play safe and disable self-refresh before adjusting watermarks. */
2383 intel_set_memory_cxsr(dev_priv
, false);
2385 /* Calc sr entries for one plane configs */
2386 if (HAS_FW_BLC(dev_priv
) && enabled
) {
2387 /* self-refresh has much higher latency */
2388 static const int sr_latency_ns
= 6000;
2389 const struct drm_display_mode
*adjusted_mode
=
2390 &enabled
->config
->hw
.adjusted_mode
;
2391 const struct drm_framebuffer
*fb
=
2392 enabled
->base
.primary
->state
->fb
;
2393 int clock
= adjusted_mode
->crtc_clock
;
2394 int htotal
= adjusted_mode
->crtc_htotal
;
2395 int hdisplay
= enabled
->config
->pipe_src_w
;
2399 if (IS_I915GM(dev_priv
) || IS_I945GM(dev_priv
))
2402 cpp
= fb
->format
->cpp
[0];
2404 entries
= intel_wm_method2(clock
, htotal
, hdisplay
, cpp
,
2405 sr_latency_ns
/ 100);
2406 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
2407 drm_dbg_kms(&dev_priv
->drm
,
2408 "self-refresh entries: %d\n", entries
);
2409 srwm
= wm_info
->fifo_size
- entries
;
2413 if (IS_I945G(dev_priv
) || IS_I945GM(dev_priv
))
2414 I915_WRITE(FW_BLC_SELF
,
2415 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
2417 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
2420 drm_dbg_kms(&dev_priv
->drm
,
2421 "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2422 planea_wm
, planeb_wm
, cwm
, srwm
);
2424 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
2425 fwater_hi
= (cwm
& 0x1f);
2427 /* Set request length to 8 cachelines per fetch */
2428 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
2429 fwater_hi
= fwater_hi
| (1 << 8);
2431 I915_WRITE(FW_BLC
, fwater_lo
);
2432 I915_WRITE(FW_BLC2
, fwater_hi
);
2435 intel_set_memory_cxsr(dev_priv
, true);
2438 static void i845_update_wm(struct intel_crtc
*unused_crtc
)
2440 struct drm_i915_private
*dev_priv
= to_i915(unused_crtc
->base
.dev
);
2441 struct intel_crtc
*crtc
;
2442 const struct drm_display_mode
*adjusted_mode
;
2446 crtc
= single_enabled_crtc(dev_priv
);
2450 adjusted_mode
= &crtc
->config
->hw
.adjusted_mode
;
2451 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
2453 dev_priv
->display
.get_fifo_size(dev_priv
, PLANE_A
),
2454 4, pessimal_latency_ns
);
2455 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
2456 fwater_lo
|= (3<<8) | planea_wm
;
2458 drm_dbg_kms(&dev_priv
->drm
,
2459 "Setting FIFO watermarks - A: %d\n", planea_wm
);
2461 I915_WRITE(FW_BLC
, fwater_lo
);
2464 /* latency must be in 0.1us units. */
2465 static unsigned int ilk_wm_method1(unsigned int pixel_rate
,
2467 unsigned int latency
)
2471 ret
= intel_wm_method1(pixel_rate
, cpp
, latency
);
2472 ret
= DIV_ROUND_UP(ret
, 64) + 2;
2477 /* latency must be in 0.1us units. */
2478 static unsigned int ilk_wm_method2(unsigned int pixel_rate
,
2479 unsigned int htotal
,
2482 unsigned int latency
)
2486 ret
= intel_wm_method2(pixel_rate
, htotal
,
2487 width
, cpp
, latency
);
2488 ret
= DIV_ROUND_UP(ret
, 64) + 2;
2493 static u32
ilk_wm_fbc(u32 pri_val
, u32 horiz_pixels
, u8 cpp
)
2496 * Neither of these should be possible since this function shouldn't be
2497 * called if the CRTC is off or the plane is invisible. But let's be
2498 * extra paranoid to avoid a potential divide-by-zero if we screw up
2499 * elsewhere in the driver.
2503 if (WARN_ON(!horiz_pixels
))
2506 return DIV_ROUND_UP(pri_val
* 64, horiz_pixels
* cpp
) + 2;
2509 struct ilk_wm_maximums
{
2517 * For both WM_PIPE and WM_LP.
2518 * mem_value must be in 0.1us units.
2520 static u32
ilk_compute_pri_wm(const struct intel_crtc_state
*crtc_state
,
2521 const struct intel_plane_state
*plane_state
,
2522 u32 mem_value
, bool is_lp
)
2524 u32 method1
, method2
;
2530 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
2533 cpp
= plane_state
->hw
.fb
->format
->cpp
[0];
2535 method1
= ilk_wm_method1(crtc_state
->pixel_rate
, cpp
, mem_value
);
2540 method2
= ilk_wm_method2(crtc_state
->pixel_rate
,
2541 crtc_state
->hw
.adjusted_mode
.crtc_htotal
,
2542 drm_rect_width(&plane_state
->uapi
.dst
),
2545 return min(method1
, method2
);
2549 * For both WM_PIPE and WM_LP.
2550 * mem_value must be in 0.1us units.
2552 static u32
ilk_compute_spr_wm(const struct intel_crtc_state
*crtc_state
,
2553 const struct intel_plane_state
*plane_state
,
2556 u32 method1
, method2
;
2562 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
2565 cpp
= plane_state
->hw
.fb
->format
->cpp
[0];
2567 method1
= ilk_wm_method1(crtc_state
->pixel_rate
, cpp
, mem_value
);
2568 method2
= ilk_wm_method2(crtc_state
->pixel_rate
,
2569 crtc_state
->hw
.adjusted_mode
.crtc_htotal
,
2570 drm_rect_width(&plane_state
->uapi
.dst
),
2572 return min(method1
, method2
);
2576 * For both WM_PIPE and WM_LP.
2577 * mem_value must be in 0.1us units.
2579 static u32
ilk_compute_cur_wm(const struct intel_crtc_state
*crtc_state
,
2580 const struct intel_plane_state
*plane_state
,
2588 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
2591 cpp
= plane_state
->hw
.fb
->format
->cpp
[0];
2593 return ilk_wm_method2(crtc_state
->pixel_rate
,
2594 crtc_state
->hw
.adjusted_mode
.crtc_htotal
,
2595 drm_rect_width(&plane_state
->uapi
.dst
),
2599 /* Only for WM_LP. */
2600 static u32
ilk_compute_fbc_wm(const struct intel_crtc_state
*crtc_state
,
2601 const struct intel_plane_state
*plane_state
,
2606 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
2609 cpp
= plane_state
->hw
.fb
->format
->cpp
[0];
2611 return ilk_wm_fbc(pri_val
, drm_rect_width(&plane_state
->uapi
.dst
),
2616 ilk_display_fifo_size(const struct drm_i915_private
*dev_priv
)
2618 if (INTEL_GEN(dev_priv
) >= 8)
2620 else if (INTEL_GEN(dev_priv
) >= 7)
2627 ilk_plane_wm_reg_max(const struct drm_i915_private
*dev_priv
,
2628 int level
, bool is_sprite
)
2630 if (INTEL_GEN(dev_priv
) >= 8)
2631 /* BDW primary/sprite plane watermarks */
2632 return level
== 0 ? 255 : 2047;
2633 else if (INTEL_GEN(dev_priv
) >= 7)
2634 /* IVB/HSW primary/sprite plane watermarks */
2635 return level
== 0 ? 127 : 1023;
2636 else if (!is_sprite
)
2637 /* ILK/SNB primary plane watermarks */
2638 return level
== 0 ? 127 : 511;
2640 /* ILK/SNB sprite plane watermarks */
2641 return level
== 0 ? 63 : 255;
2645 ilk_cursor_wm_reg_max(const struct drm_i915_private
*dev_priv
, int level
)
2647 if (INTEL_GEN(dev_priv
) >= 7)
2648 return level
== 0 ? 63 : 255;
2650 return level
== 0 ? 31 : 63;
2653 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private
*dev_priv
)
2655 if (INTEL_GEN(dev_priv
) >= 8)
2661 /* Calculate the maximum primary/sprite plane watermark */
2662 static unsigned int ilk_plane_wm_max(const struct drm_i915_private
*dev_priv
,
2664 const struct intel_wm_config
*config
,
2665 enum intel_ddb_partitioning ddb_partitioning
,
2668 unsigned int fifo_size
= ilk_display_fifo_size(dev_priv
);
2670 /* if sprites aren't enabled, sprites get nothing */
2671 if (is_sprite
&& !config
->sprites_enabled
)
2674 /* HSW allows LP1+ watermarks even with multiple pipes */
2675 if (level
== 0 || config
->num_pipes_active
> 1) {
2676 fifo_size
/= INTEL_NUM_PIPES(dev_priv
);
2679 * For some reason the non self refresh
2680 * FIFO size is only half of the self
2681 * refresh FIFO size on ILK/SNB.
2683 if (INTEL_GEN(dev_priv
) <= 6)
2687 if (config
->sprites_enabled
) {
2688 /* level 0 is always calculated with 1:1 split */
2689 if (level
> 0 && ddb_partitioning
== INTEL_DDB_PART_5_6
) {
2698 /* clamp to max that the registers can hold */
2699 return min(fifo_size
, ilk_plane_wm_reg_max(dev_priv
, level
, is_sprite
));
2702 /* Calculate the maximum cursor plane watermark */
2703 static unsigned int ilk_cursor_wm_max(const struct drm_i915_private
*dev_priv
,
2705 const struct intel_wm_config
*config
)
2707 /* HSW LP1+ watermarks w/ multiple pipes */
2708 if (level
> 0 && config
->num_pipes_active
> 1)
2711 /* otherwise just report max that registers can hold */
2712 return ilk_cursor_wm_reg_max(dev_priv
, level
);
2715 static void ilk_compute_wm_maximums(const struct drm_i915_private
*dev_priv
,
2717 const struct intel_wm_config
*config
,
2718 enum intel_ddb_partitioning ddb_partitioning
,
2719 struct ilk_wm_maximums
*max
)
2721 max
->pri
= ilk_plane_wm_max(dev_priv
, level
, config
, ddb_partitioning
, false);
2722 max
->spr
= ilk_plane_wm_max(dev_priv
, level
, config
, ddb_partitioning
, true);
2723 max
->cur
= ilk_cursor_wm_max(dev_priv
, level
, config
);
2724 max
->fbc
= ilk_fbc_wm_reg_max(dev_priv
);
2727 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private
*dev_priv
,
2729 struct ilk_wm_maximums
*max
)
2731 max
->pri
= ilk_plane_wm_reg_max(dev_priv
, level
, false);
2732 max
->spr
= ilk_plane_wm_reg_max(dev_priv
, level
, true);
2733 max
->cur
= ilk_cursor_wm_reg_max(dev_priv
, level
);
2734 max
->fbc
= ilk_fbc_wm_reg_max(dev_priv
);
2737 static bool ilk_validate_wm_level(int level
,
2738 const struct ilk_wm_maximums
*max
,
2739 struct intel_wm_level
*result
)
2743 /* already determined to be invalid? */
2744 if (!result
->enable
)
2747 result
->enable
= result
->pri_val
<= max
->pri
&&
2748 result
->spr_val
<= max
->spr
&&
2749 result
->cur_val
<= max
->cur
;
2751 ret
= result
->enable
;
2754 * HACK until we can pre-compute everything,
2755 * and thus fail gracefully if LP0 watermarks
2758 if (level
== 0 && !result
->enable
) {
2759 if (result
->pri_val
> max
->pri
)
2760 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2761 level
, result
->pri_val
, max
->pri
);
2762 if (result
->spr_val
> max
->spr
)
2763 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2764 level
, result
->spr_val
, max
->spr
);
2765 if (result
->cur_val
> max
->cur
)
2766 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2767 level
, result
->cur_val
, max
->cur
);
2769 result
->pri_val
= min_t(u32
, result
->pri_val
, max
->pri
);
2770 result
->spr_val
= min_t(u32
, result
->spr_val
, max
->spr
);
2771 result
->cur_val
= min_t(u32
, result
->cur_val
, max
->cur
);
2772 result
->enable
= true;
2778 static void ilk_compute_wm_level(const struct drm_i915_private
*dev_priv
,
2779 const struct intel_crtc
*intel_crtc
,
2781 struct intel_crtc_state
*crtc_state
,
2782 const struct intel_plane_state
*pristate
,
2783 const struct intel_plane_state
*sprstate
,
2784 const struct intel_plane_state
*curstate
,
2785 struct intel_wm_level
*result
)
2787 u16 pri_latency
= dev_priv
->wm
.pri_latency
[level
];
2788 u16 spr_latency
= dev_priv
->wm
.spr_latency
[level
];
2789 u16 cur_latency
= dev_priv
->wm
.cur_latency
[level
];
2791 /* WM1+ latency values stored in 0.5us units */
2799 result
->pri_val
= ilk_compute_pri_wm(crtc_state
, pristate
,
2800 pri_latency
, level
);
2801 result
->fbc_val
= ilk_compute_fbc_wm(crtc_state
, pristate
, result
->pri_val
);
2805 result
->spr_val
= ilk_compute_spr_wm(crtc_state
, sprstate
, spr_latency
);
2808 result
->cur_val
= ilk_compute_cur_wm(crtc_state
, curstate
, cur_latency
);
2810 result
->enable
= true;
2814 hsw_compute_linetime_wm(const struct intel_crtc_state
*crtc_state
)
2816 const struct intel_atomic_state
*intel_state
=
2817 to_intel_atomic_state(crtc_state
->uapi
.state
);
2818 const struct drm_display_mode
*adjusted_mode
=
2819 &crtc_state
->hw
.adjusted_mode
;
2820 u32 linetime
, ips_linetime
;
2822 if (!crtc_state
->hw
.active
)
2824 if (WARN_ON(adjusted_mode
->crtc_clock
== 0))
2826 if (WARN_ON(intel_state
->cdclk
.logical
.cdclk
== 0))
2829 /* The WM are computed with base on how long it takes to fill a single
2830 * row at the given clock rate, multiplied by 8.
2832 linetime
= DIV_ROUND_CLOSEST(adjusted_mode
->crtc_htotal
* 1000 * 8,
2833 adjusted_mode
->crtc_clock
);
2834 ips_linetime
= DIV_ROUND_CLOSEST(adjusted_mode
->crtc_htotal
* 1000 * 8,
2835 intel_state
->cdclk
.logical
.cdclk
);
2837 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime
) |
2838 PIPE_WM_LINETIME_TIME(linetime
);
2841 static void intel_read_wm_latency(struct drm_i915_private
*dev_priv
,
2844 struct intel_uncore
*uncore
= &dev_priv
->uncore
;
2846 if (INTEL_GEN(dev_priv
) >= 9) {
2849 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2851 /* read the first set of memory latencies[0:3] */
2852 val
= 0; /* data0 to be programmed to 0 for first set */
2853 ret
= sandybridge_pcode_read(dev_priv
,
2854 GEN9_PCODE_READ_MEM_LATENCY
,
2858 drm_err(&dev_priv
->drm
,
2859 "SKL Mailbox read error = %d\n", ret
);
2863 wm
[0] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2864 wm
[1] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2865 GEN9_MEM_LATENCY_LEVEL_MASK
;
2866 wm
[2] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2867 GEN9_MEM_LATENCY_LEVEL_MASK
;
2868 wm
[3] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2869 GEN9_MEM_LATENCY_LEVEL_MASK
;
2871 /* read the second set of memory latencies[4:7] */
2872 val
= 1; /* data0 to be programmed to 1 for second set */
2873 ret
= sandybridge_pcode_read(dev_priv
,
2874 GEN9_PCODE_READ_MEM_LATENCY
,
2877 drm_err(&dev_priv
->drm
,
2878 "SKL Mailbox read error = %d\n", ret
);
2882 wm
[4] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2883 wm
[5] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2884 GEN9_MEM_LATENCY_LEVEL_MASK
;
2885 wm
[6] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2886 GEN9_MEM_LATENCY_LEVEL_MASK
;
2887 wm
[7] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2888 GEN9_MEM_LATENCY_LEVEL_MASK
;
2891 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2892 * need to be disabled. We make sure to sanitize the values out
2893 * of the punit to satisfy this requirement.
2895 for (level
= 1; level
<= max_level
; level
++) {
2896 if (wm
[level
] == 0) {
2897 for (i
= level
+ 1; i
<= max_level
; i
++)
2904 * WaWmMemoryReadLatency:skl+,glk
2906 * punit doesn't take into account the read latency so we need
2907 * to add 2us to the various latency levels we retrieve from the
2908 * punit when level 0 response data us 0us.
2912 for (level
= 1; level
<= max_level
; level
++) {
2920 * WA Level-0 adjustment for 16GB DIMMs: SKL+
2921 * If we could not get dimm info enable this WA to prevent from
2922 * any underrun. If not able to get Dimm info assume 16GB dimm
2923 * to avoid any underrun.
2925 if (dev_priv
->dram_info
.is_16gb_dimm
)
2928 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
2929 u64 sskpd
= intel_uncore_read64(uncore
, MCH_SSKPD
);
2931 wm
[0] = (sskpd
>> 56) & 0xFF;
2933 wm
[0] = sskpd
& 0xF;
2934 wm
[1] = (sskpd
>> 4) & 0xFF;
2935 wm
[2] = (sskpd
>> 12) & 0xFF;
2936 wm
[3] = (sskpd
>> 20) & 0x1FF;
2937 wm
[4] = (sskpd
>> 32) & 0x1FF;
2938 } else if (INTEL_GEN(dev_priv
) >= 6) {
2939 u32 sskpd
= intel_uncore_read(uncore
, MCH_SSKPD
);
2941 wm
[0] = (sskpd
>> SSKPD_WM0_SHIFT
) & SSKPD_WM_MASK
;
2942 wm
[1] = (sskpd
>> SSKPD_WM1_SHIFT
) & SSKPD_WM_MASK
;
2943 wm
[2] = (sskpd
>> SSKPD_WM2_SHIFT
) & SSKPD_WM_MASK
;
2944 wm
[3] = (sskpd
>> SSKPD_WM3_SHIFT
) & SSKPD_WM_MASK
;
2945 } else if (INTEL_GEN(dev_priv
) >= 5) {
2946 u32 mltr
= intel_uncore_read(uncore
, MLTR_ILK
);
2948 /* ILK primary LP0 latency is 700 ns */
2950 wm
[1] = (mltr
>> MLTR_WM1_SHIFT
) & ILK_SRLT_MASK
;
2951 wm
[2] = (mltr
>> MLTR_WM2_SHIFT
) & ILK_SRLT_MASK
;
2953 MISSING_CASE(INTEL_DEVID(dev_priv
));
2957 static void intel_fixup_spr_wm_latency(struct drm_i915_private
*dev_priv
,
2960 /* ILK sprite LP0 latency is 1300 ns */
2961 if (IS_GEN(dev_priv
, 5))
2965 static void intel_fixup_cur_wm_latency(struct drm_i915_private
*dev_priv
,
2968 /* ILK cursor LP0 latency is 1300 ns */
2969 if (IS_GEN(dev_priv
, 5))
2973 int ilk_wm_max_level(const struct drm_i915_private
*dev_priv
)
2975 /* how many WM levels are we expecting */
2976 if (INTEL_GEN(dev_priv
) >= 9)
2978 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
2980 else if (INTEL_GEN(dev_priv
) >= 6)
2986 static void intel_print_wm_latency(struct drm_i915_private
*dev_priv
,
2990 int level
, max_level
= ilk_wm_max_level(dev_priv
);
2992 for (level
= 0; level
<= max_level
; level
++) {
2993 unsigned int latency
= wm
[level
];
2996 drm_dbg_kms(&dev_priv
->drm
,
2997 "%s WM%d latency not provided\n",
3003 * - latencies are in us on gen9.
3004 * - before then, WM1+ latency values are in 0.5us units
3006 if (INTEL_GEN(dev_priv
) >= 9)
3011 drm_dbg_kms(&dev_priv
->drm
,
3012 "%s WM%d latency %u (%u.%u usec)\n", name
, level
,
3013 wm
[level
], latency
/ 10, latency
% 10);
3017 static bool ilk_increase_wm_latency(struct drm_i915_private
*dev_priv
,
3020 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3025 wm
[0] = max(wm
[0], min
);
3026 for (level
= 1; level
<= max_level
; level
++)
3027 wm
[level
] = max_t(u16
, wm
[level
], DIV_ROUND_UP(min
, 5));
3032 static void snb_wm_latency_quirk(struct drm_i915_private
*dev_priv
)
3037 * The BIOS provided WM memory latency values are often
3038 * inadequate for high resolution displays. Adjust them.
3040 changed
= ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
, 12) |
3041 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
, 12) |
3042 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
, 12);
3047 drm_dbg_kms(&dev_priv
->drm
,
3048 "WM latency values increased to avoid potential underruns\n");
3049 intel_print_wm_latency(dev_priv
, "Primary", dev_priv
->wm
.pri_latency
);
3050 intel_print_wm_latency(dev_priv
, "Sprite", dev_priv
->wm
.spr_latency
);
3051 intel_print_wm_latency(dev_priv
, "Cursor", dev_priv
->wm
.cur_latency
);
3054 static void snb_wm_lp3_irq_quirk(struct drm_i915_private
*dev_priv
)
3057 * On some SNB machines (Thinkpad X220 Tablet at least)
3058 * LP3 usage can cause vblank interrupts to be lost.
3059 * The DEIIR bit will go high but it looks like the CPU
3060 * never gets interrupted.
3062 * It's not clear whether other interrupt source could
3063 * be affected or if this is somehow limited to vblank
3064 * interrupts only. To play it safe we disable LP3
3065 * watermarks entirely.
3067 if (dev_priv
->wm
.pri_latency
[3] == 0 &&
3068 dev_priv
->wm
.spr_latency
[3] == 0 &&
3069 dev_priv
->wm
.cur_latency
[3] == 0)
3072 dev_priv
->wm
.pri_latency
[3] = 0;
3073 dev_priv
->wm
.spr_latency
[3] = 0;
3074 dev_priv
->wm
.cur_latency
[3] = 0;
3076 drm_dbg_kms(&dev_priv
->drm
,
3077 "LP3 watermarks disabled due to potential for lost interrupts\n");
3078 intel_print_wm_latency(dev_priv
, "Primary", dev_priv
->wm
.pri_latency
);
3079 intel_print_wm_latency(dev_priv
, "Sprite", dev_priv
->wm
.spr_latency
);
3080 intel_print_wm_latency(dev_priv
, "Cursor", dev_priv
->wm
.cur_latency
);
3083 static void ilk_setup_wm_latency(struct drm_i915_private
*dev_priv
)
3085 intel_read_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
);
3087 memcpy(dev_priv
->wm
.spr_latency
, dev_priv
->wm
.pri_latency
,
3088 sizeof(dev_priv
->wm
.pri_latency
));
3089 memcpy(dev_priv
->wm
.cur_latency
, dev_priv
->wm
.pri_latency
,
3090 sizeof(dev_priv
->wm
.pri_latency
));
3092 intel_fixup_spr_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
);
3093 intel_fixup_cur_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
);
3095 intel_print_wm_latency(dev_priv
, "Primary", dev_priv
->wm
.pri_latency
);
3096 intel_print_wm_latency(dev_priv
, "Sprite", dev_priv
->wm
.spr_latency
);
3097 intel_print_wm_latency(dev_priv
, "Cursor", dev_priv
->wm
.cur_latency
);
3099 if (IS_GEN(dev_priv
, 6)) {
3100 snb_wm_latency_quirk(dev_priv
);
3101 snb_wm_lp3_irq_quirk(dev_priv
);
3105 static void skl_setup_wm_latency(struct drm_i915_private
*dev_priv
)
3107 intel_read_wm_latency(dev_priv
, dev_priv
->wm
.skl_latency
);
3108 intel_print_wm_latency(dev_priv
, "Gen9 Plane", dev_priv
->wm
.skl_latency
);
3111 static bool ilk_validate_pipe_wm(const struct drm_i915_private
*dev_priv
,
3112 struct intel_pipe_wm
*pipe_wm
)
3114 /* LP0 watermark maximums depend on this pipe alone */
3115 const struct intel_wm_config config
= {
3116 .num_pipes_active
= 1,
3117 .sprites_enabled
= pipe_wm
->sprites_enabled
,
3118 .sprites_scaled
= pipe_wm
->sprites_scaled
,
3120 struct ilk_wm_maximums max
;
3122 /* LP0 watermarks always use 1/2 DDB partitioning */
3123 ilk_compute_wm_maximums(dev_priv
, 0, &config
, INTEL_DDB_PART_1_2
, &max
);
3125 /* At least LP0 must be valid */
3126 if (!ilk_validate_wm_level(0, &max
, &pipe_wm
->wm
[0])) {
3127 drm_dbg_kms(&dev_priv
->drm
, "LP0 watermark invalid\n");
3134 /* Compute new watermarks for the pipe */
3135 static int ilk_compute_pipe_wm(struct intel_crtc_state
*crtc_state
)
3137 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
3138 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
3139 struct intel_pipe_wm
*pipe_wm
;
3140 struct intel_plane
*plane
;
3141 const struct intel_plane_state
*plane_state
;
3142 const struct intel_plane_state
*pristate
= NULL
;
3143 const struct intel_plane_state
*sprstate
= NULL
;
3144 const struct intel_plane_state
*curstate
= NULL
;
3145 int level
, max_level
= ilk_wm_max_level(dev_priv
), usable_level
;
3146 struct ilk_wm_maximums max
;
3148 pipe_wm
= &crtc_state
->wm
.ilk
.optimal
;
3150 intel_atomic_crtc_state_for_each_plane_state(plane
, plane_state
, crtc_state
) {
3151 if (plane
->base
.type
== DRM_PLANE_TYPE_PRIMARY
)
3152 pristate
= plane_state
;
3153 else if (plane
->base
.type
== DRM_PLANE_TYPE_OVERLAY
)
3154 sprstate
= plane_state
;
3155 else if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
3156 curstate
= plane_state
;
3159 pipe_wm
->pipe_enabled
= crtc_state
->hw
.active
;
3161 pipe_wm
->sprites_enabled
= sprstate
->uapi
.visible
;
3162 pipe_wm
->sprites_scaled
= sprstate
->uapi
.visible
&&
3163 (drm_rect_width(&sprstate
->uapi
.dst
) != drm_rect_width(&sprstate
->uapi
.src
) >> 16 ||
3164 drm_rect_height(&sprstate
->uapi
.dst
) != drm_rect_height(&sprstate
->uapi
.src
) >> 16);
3167 usable_level
= max_level
;
3169 /* ILK/SNB: LP2+ watermarks only w/o sprites */
3170 if (INTEL_GEN(dev_priv
) <= 6 && pipe_wm
->sprites_enabled
)
3173 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
3174 if (pipe_wm
->sprites_scaled
)
3177 memset(&pipe_wm
->wm
, 0, sizeof(pipe_wm
->wm
));
3178 ilk_compute_wm_level(dev_priv
, intel_crtc
, 0, crtc_state
,
3179 pristate
, sprstate
, curstate
, &pipe_wm
->wm
[0]);
3181 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
3182 pipe_wm
->linetime
= hsw_compute_linetime_wm(crtc_state
);
3184 if (!ilk_validate_pipe_wm(dev_priv
, pipe_wm
))
3187 ilk_compute_wm_reg_maximums(dev_priv
, 1, &max
);
3189 for (level
= 1; level
<= usable_level
; level
++) {
3190 struct intel_wm_level
*wm
= &pipe_wm
->wm
[level
];
3192 ilk_compute_wm_level(dev_priv
, intel_crtc
, level
, crtc_state
,
3193 pristate
, sprstate
, curstate
, wm
);
3196 * Disable any watermark level that exceeds the
3197 * register maximums since such watermarks are
3200 if (!ilk_validate_wm_level(level
, &max
, wm
)) {
3201 memset(wm
, 0, sizeof(*wm
));
3210 * Build a set of 'intermediate' watermark values that satisfy both the old
3211 * state and the new state. These can be programmed to the hardware
3214 static int ilk_compute_intermediate_wm(struct intel_crtc_state
*newstate
)
3216 struct intel_crtc
*intel_crtc
= to_intel_crtc(newstate
->uapi
.crtc
);
3217 struct drm_i915_private
*dev_priv
= to_i915(intel_crtc
->base
.dev
);
3218 struct intel_pipe_wm
*a
= &newstate
->wm
.ilk
.intermediate
;
3219 struct intel_atomic_state
*intel_state
=
3220 to_intel_atomic_state(newstate
->uapi
.state
);
3221 const struct intel_crtc_state
*oldstate
=
3222 intel_atomic_get_old_crtc_state(intel_state
, intel_crtc
);
3223 const struct intel_pipe_wm
*b
= &oldstate
->wm
.ilk
.optimal
;
3224 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3227 * Start with the final, target watermarks, then combine with the
3228 * currently active watermarks to get values that are safe both before
3229 * and after the vblank.
3231 *a
= newstate
->wm
.ilk
.optimal
;
3232 if (!newstate
->hw
.active
|| drm_atomic_crtc_needs_modeset(&newstate
->uapi
) ||
3233 intel_state
->skip_intermediate_wm
)
3236 a
->pipe_enabled
|= b
->pipe_enabled
;
3237 a
->sprites_enabled
|= b
->sprites_enabled
;
3238 a
->sprites_scaled
|= b
->sprites_scaled
;
3240 for (level
= 0; level
<= max_level
; level
++) {
3241 struct intel_wm_level
*a_wm
= &a
->wm
[level
];
3242 const struct intel_wm_level
*b_wm
= &b
->wm
[level
];
3244 a_wm
->enable
&= b_wm
->enable
;
3245 a_wm
->pri_val
= max(a_wm
->pri_val
, b_wm
->pri_val
);
3246 a_wm
->spr_val
= max(a_wm
->spr_val
, b_wm
->spr_val
);
3247 a_wm
->cur_val
= max(a_wm
->cur_val
, b_wm
->cur_val
);
3248 a_wm
->fbc_val
= max(a_wm
->fbc_val
, b_wm
->fbc_val
);
3252 * We need to make sure that these merged watermark values are
3253 * actually a valid configuration themselves. If they're not,
3254 * there's no safe way to transition from the old state to
3255 * the new state, so we need to fail the atomic transaction.
3257 if (!ilk_validate_pipe_wm(dev_priv
, a
))
3261 * If our intermediate WM are identical to the final WM, then we can
3262 * omit the post-vblank programming; only update if it's different.
3264 if (memcmp(a
, &newstate
->wm
.ilk
.optimal
, sizeof(*a
)) != 0)
3265 newstate
->wm
.need_postvbl_update
= true;
3271 * Merge the watermarks from all active pipes for a specific level.
3273 static void ilk_merge_wm_level(struct drm_i915_private
*dev_priv
,
3275 struct intel_wm_level
*ret_wm
)
3277 const struct intel_crtc
*intel_crtc
;
3279 ret_wm
->enable
= true;
3281 for_each_intel_crtc(&dev_priv
->drm
, intel_crtc
) {
3282 const struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
.ilk
;
3283 const struct intel_wm_level
*wm
= &active
->wm
[level
];
3285 if (!active
->pipe_enabled
)
3289 * The watermark values may have been used in the past,
3290 * so we must maintain them in the registers for some
3291 * time even if the level is now disabled.
3294 ret_wm
->enable
= false;
3296 ret_wm
->pri_val
= max(ret_wm
->pri_val
, wm
->pri_val
);
3297 ret_wm
->spr_val
= max(ret_wm
->spr_val
, wm
->spr_val
);
3298 ret_wm
->cur_val
= max(ret_wm
->cur_val
, wm
->cur_val
);
3299 ret_wm
->fbc_val
= max(ret_wm
->fbc_val
, wm
->fbc_val
);
3304 * Merge all low power watermarks for all active pipes.
3306 static void ilk_wm_merge(struct drm_i915_private
*dev_priv
,
3307 const struct intel_wm_config
*config
,
3308 const struct ilk_wm_maximums
*max
,
3309 struct intel_pipe_wm
*merged
)
3311 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3312 int last_enabled_level
= max_level
;
3314 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
3315 if ((INTEL_GEN(dev_priv
) <= 6 || IS_IVYBRIDGE(dev_priv
)) &&
3316 config
->num_pipes_active
> 1)
3317 last_enabled_level
= 0;
3319 /* ILK: FBC WM must be disabled always */
3320 merged
->fbc_wm_enabled
= INTEL_GEN(dev_priv
) >= 6;
3322 /* merge each WM1+ level */
3323 for (level
= 1; level
<= max_level
; level
++) {
3324 struct intel_wm_level
*wm
= &merged
->wm
[level
];
3326 ilk_merge_wm_level(dev_priv
, level
, wm
);
3328 if (level
> last_enabled_level
)
3330 else if (!ilk_validate_wm_level(level
, max
, wm
))
3331 /* make sure all following levels get disabled */
3332 last_enabled_level
= level
- 1;
3335 * The spec says it is preferred to disable
3336 * FBC WMs instead of disabling a WM level.
3338 if (wm
->fbc_val
> max
->fbc
) {
3340 merged
->fbc_wm_enabled
= false;
3345 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3347 * FIXME this is racy. FBC might get enabled later.
3348 * What we should check here is whether FBC can be
3349 * enabled sometime later.
3351 if (IS_GEN(dev_priv
, 5) && !merged
->fbc_wm_enabled
&&
3352 intel_fbc_is_active(dev_priv
)) {
3353 for (level
= 2; level
<= max_level
; level
++) {
3354 struct intel_wm_level
*wm
= &merged
->wm
[level
];
3361 static int ilk_wm_lp_to_level(int wm_lp
, const struct intel_pipe_wm
*pipe_wm
)
3363 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
3364 return wm_lp
+ (wm_lp
>= 2 && pipe_wm
->wm
[4].enable
);
3367 /* The value we need to program into the WM_LPx latency field */
3368 static unsigned int ilk_wm_lp_latency(struct drm_i915_private
*dev_priv
,
3371 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
3374 return dev_priv
->wm
.pri_latency
[level
];
3377 static void ilk_compute_wm_results(struct drm_i915_private
*dev_priv
,
3378 const struct intel_pipe_wm
*merged
,
3379 enum intel_ddb_partitioning partitioning
,
3380 struct ilk_wm_values
*results
)
3382 struct intel_crtc
*intel_crtc
;
3385 results
->enable_fbc_wm
= merged
->fbc_wm_enabled
;
3386 results
->partitioning
= partitioning
;
3388 /* LP1+ register values */
3389 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
3390 const struct intel_wm_level
*r
;
3392 level
= ilk_wm_lp_to_level(wm_lp
, merged
);
3394 r
= &merged
->wm
[level
];
3397 * Maintain the watermark values even if the level is
3398 * disabled. Doing otherwise could cause underruns.
3400 results
->wm_lp
[wm_lp
- 1] =
3401 (ilk_wm_lp_latency(dev_priv
, level
) << WM1_LP_LATENCY_SHIFT
) |
3402 (r
->pri_val
<< WM1_LP_SR_SHIFT
) |
3406 results
->wm_lp
[wm_lp
- 1] |= WM1_LP_SR_EN
;
3408 if (INTEL_GEN(dev_priv
) >= 8)
3409 results
->wm_lp
[wm_lp
- 1] |=
3410 r
->fbc_val
<< WM1_LP_FBC_SHIFT_BDW
;
3412 results
->wm_lp
[wm_lp
- 1] |=
3413 r
->fbc_val
<< WM1_LP_FBC_SHIFT
;
3416 * Always set WM1S_LP_EN when spr_val != 0, even if the
3417 * level is disabled. Doing otherwise could cause underruns.
3419 if (INTEL_GEN(dev_priv
) <= 6 && r
->spr_val
) {
3420 WARN_ON(wm_lp
!= 1);
3421 results
->wm_lp_spr
[wm_lp
- 1] = WM1S_LP_EN
| r
->spr_val
;
3423 results
->wm_lp_spr
[wm_lp
- 1] = r
->spr_val
;
3426 /* LP0 register values */
3427 for_each_intel_crtc(&dev_priv
->drm
, intel_crtc
) {
3428 enum pipe pipe
= intel_crtc
->pipe
;
3429 const struct intel_wm_level
*r
=
3430 &intel_crtc
->wm
.active
.ilk
.wm
[0];
3432 if (WARN_ON(!r
->enable
))
3435 results
->wm_linetime
[pipe
] = intel_crtc
->wm
.active
.ilk
.linetime
;
3437 results
->wm_pipe
[pipe
] =
3438 (r
->pri_val
<< WM0_PIPE_PLANE_SHIFT
) |
3439 (r
->spr_val
<< WM0_PIPE_SPRITE_SHIFT
) |
3444 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
3445 * case both are at the same level. Prefer r1 in case they're the same. */
3446 static struct intel_pipe_wm
*
3447 ilk_find_best_result(struct drm_i915_private
*dev_priv
,
3448 struct intel_pipe_wm
*r1
,
3449 struct intel_pipe_wm
*r2
)
3451 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3452 int level1
= 0, level2
= 0;
3454 for (level
= 1; level
<= max_level
; level
++) {
3455 if (r1
->wm
[level
].enable
)
3457 if (r2
->wm
[level
].enable
)
3461 if (level1
== level2
) {
3462 if (r2
->fbc_wm_enabled
&& !r1
->fbc_wm_enabled
)
3466 } else if (level1
> level2
) {
3473 /* dirty bits used to track which watermarks need changes */
3474 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3475 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
3476 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3477 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3478 #define WM_DIRTY_FBC (1 << 24)
3479 #define WM_DIRTY_DDB (1 << 25)
3481 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private
*dev_priv
,
3482 const struct ilk_wm_values
*old
,
3483 const struct ilk_wm_values
*new)
3485 unsigned int dirty
= 0;
3489 for_each_pipe(dev_priv
, pipe
) {
3490 if (old
->wm_linetime
[pipe
] != new->wm_linetime
[pipe
]) {
3491 dirty
|= WM_DIRTY_LINETIME(pipe
);
3492 /* Must disable LP1+ watermarks too */
3493 dirty
|= WM_DIRTY_LP_ALL
;
3496 if (old
->wm_pipe
[pipe
] != new->wm_pipe
[pipe
]) {
3497 dirty
|= WM_DIRTY_PIPE(pipe
);
3498 /* Must disable LP1+ watermarks too */
3499 dirty
|= WM_DIRTY_LP_ALL
;
3503 if (old
->enable_fbc_wm
!= new->enable_fbc_wm
) {
3504 dirty
|= WM_DIRTY_FBC
;
3505 /* Must disable LP1+ watermarks too */
3506 dirty
|= WM_DIRTY_LP_ALL
;
3509 if (old
->partitioning
!= new->partitioning
) {
3510 dirty
|= WM_DIRTY_DDB
;
3511 /* Must disable LP1+ watermarks too */
3512 dirty
|= WM_DIRTY_LP_ALL
;
3515 /* LP1+ watermarks already deemed dirty, no need to continue */
3516 if (dirty
& WM_DIRTY_LP_ALL
)
3519 /* Find the lowest numbered LP1+ watermark in need of an update... */
3520 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
3521 if (old
->wm_lp
[wm_lp
- 1] != new->wm_lp
[wm_lp
- 1] ||
3522 old
->wm_lp_spr
[wm_lp
- 1] != new->wm_lp_spr
[wm_lp
- 1])
3526 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
3527 for (; wm_lp
<= 3; wm_lp
++)
3528 dirty
|= WM_DIRTY_LP(wm_lp
);
3533 static bool _ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
,
3536 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
3537 bool changed
= false;
3539 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] & WM1_LP_SR_EN
) {
3540 previous
->wm_lp
[2] &= ~WM1_LP_SR_EN
;
3541 I915_WRITE(WM3_LP_ILK
, previous
->wm_lp
[2]);
3544 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] & WM1_LP_SR_EN
) {
3545 previous
->wm_lp
[1] &= ~WM1_LP_SR_EN
;
3546 I915_WRITE(WM2_LP_ILK
, previous
->wm_lp
[1]);
3549 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] & WM1_LP_SR_EN
) {
3550 previous
->wm_lp
[0] &= ~WM1_LP_SR_EN
;
3551 I915_WRITE(WM1_LP_ILK
, previous
->wm_lp
[0]);
3556 * Don't touch WM1S_LP_EN here.
3557 * Doing so could cause underruns.
3564 * The spec says we shouldn't write when we don't need, because every write
3565 * causes WMs to be re-evaluated, expending some power.
3567 static void ilk_write_wm_values(struct drm_i915_private
*dev_priv
,
3568 struct ilk_wm_values
*results
)
3570 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
3574 dirty
= ilk_compute_wm_dirty(dev_priv
, previous
, results
);
3578 _ilk_disable_lp_wm(dev_priv
, dirty
);
3580 if (dirty
& WM_DIRTY_PIPE(PIPE_A
))
3581 I915_WRITE(WM0_PIPEA_ILK
, results
->wm_pipe
[0]);
3582 if (dirty
& WM_DIRTY_PIPE(PIPE_B
))
3583 I915_WRITE(WM0_PIPEB_ILK
, results
->wm_pipe
[1]);
3584 if (dirty
& WM_DIRTY_PIPE(PIPE_C
))
3585 I915_WRITE(WM0_PIPEC_IVB
, results
->wm_pipe
[2]);
3587 if (dirty
& WM_DIRTY_LINETIME(PIPE_A
))
3588 I915_WRITE(PIPE_WM_LINETIME(PIPE_A
), results
->wm_linetime
[0]);
3589 if (dirty
& WM_DIRTY_LINETIME(PIPE_B
))
3590 I915_WRITE(PIPE_WM_LINETIME(PIPE_B
), results
->wm_linetime
[1]);
3591 if (dirty
& WM_DIRTY_LINETIME(PIPE_C
))
3592 I915_WRITE(PIPE_WM_LINETIME(PIPE_C
), results
->wm_linetime
[2]);
3594 if (dirty
& WM_DIRTY_DDB
) {
3595 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
3596 val
= I915_READ(WM_MISC
);
3597 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
3598 val
&= ~WM_MISC_DATA_PARTITION_5_6
;
3600 val
|= WM_MISC_DATA_PARTITION_5_6
;
3601 I915_WRITE(WM_MISC
, val
);
3603 val
= I915_READ(DISP_ARB_CTL2
);
3604 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
3605 val
&= ~DISP_DATA_PARTITION_5_6
;
3607 val
|= DISP_DATA_PARTITION_5_6
;
3608 I915_WRITE(DISP_ARB_CTL2
, val
);
3612 if (dirty
& WM_DIRTY_FBC
) {
3613 val
= I915_READ(DISP_ARB_CTL
);
3614 if (results
->enable_fbc_wm
)
3615 val
&= ~DISP_FBC_WM_DIS
;
3617 val
|= DISP_FBC_WM_DIS
;
3618 I915_WRITE(DISP_ARB_CTL
, val
);
3621 if (dirty
& WM_DIRTY_LP(1) &&
3622 previous
->wm_lp_spr
[0] != results
->wm_lp_spr
[0])
3623 I915_WRITE(WM1S_LP_ILK
, results
->wm_lp_spr
[0]);
3625 if (INTEL_GEN(dev_priv
) >= 7) {
3626 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp_spr
[1] != results
->wm_lp_spr
[1])
3627 I915_WRITE(WM2S_LP_IVB
, results
->wm_lp_spr
[1]);
3628 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp_spr
[2] != results
->wm_lp_spr
[2])
3629 I915_WRITE(WM3S_LP_IVB
, results
->wm_lp_spr
[2]);
3632 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] != results
->wm_lp
[0])
3633 I915_WRITE(WM1_LP_ILK
, results
->wm_lp
[0]);
3634 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] != results
->wm_lp
[1])
3635 I915_WRITE(WM2_LP_ILK
, results
->wm_lp
[1]);
3636 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] != results
->wm_lp
[2])
3637 I915_WRITE(WM3_LP_ILK
, results
->wm_lp
[2]);
3639 dev_priv
->wm
.hw
= *results
;
3642 bool ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
)
3644 return _ilk_disable_lp_wm(dev_priv
, WM_DIRTY_LP_ALL
);
3647 static u8
intel_enabled_dbuf_slices_num(struct drm_i915_private
*dev_priv
)
3651 /* Slice 1 will always be enabled */
3654 /* Gen prior to GEN11 have only one DBuf slice */
3655 if (INTEL_GEN(dev_priv
) < 11)
3656 return enabled_slices
;
3659 * FIXME: for now we'll only ever use 1 slice; pretend that we have
3660 * only that 1 slice enabled until we have a proper way for on-demand
3661 * toggling of the second slice.
3663 if (0 && I915_READ(DBUF_CTL_S2
) & DBUF_POWER_STATE
)
3666 return enabled_slices
;
3670 * FIXME: We still don't have the proper code detect if we need to apply the WA,
3671 * so assume we'll always need it in order to avoid underruns.
3673 static bool skl_needs_memory_bw_wa(struct drm_i915_private
*dev_priv
)
3675 return IS_GEN9_BC(dev_priv
) || IS_BROXTON(dev_priv
);
3679 intel_has_sagv(struct drm_i915_private
*dev_priv
)
3682 if (IS_GEN(dev_priv
, 12))
3685 return (IS_GEN9_BC(dev_priv
) || INTEL_GEN(dev_priv
) >= 10) &&
3686 dev_priv
->sagv_status
!= I915_SAGV_NOT_CONTROLLED
;
3690 skl_setup_sagv_block_time(struct drm_i915_private
*dev_priv
)
3692 if (INTEL_GEN(dev_priv
) >= 12) {
3696 ret
= sandybridge_pcode_read(dev_priv
,
3697 GEN12_PCODE_READ_SAGV_BLOCK_TIME_US
,
3700 dev_priv
->sagv_block_time_us
= val
;
3704 drm_dbg(&dev_priv
->drm
, "Couldn't read SAGV block time!\n");
3705 } else if (IS_GEN(dev_priv
, 11)) {
3706 dev_priv
->sagv_block_time_us
= 10;
3708 } else if (IS_GEN(dev_priv
, 10)) {
3709 dev_priv
->sagv_block_time_us
= 20;
3711 } else if (IS_GEN(dev_priv
, 9)) {
3712 dev_priv
->sagv_block_time_us
= 30;
3715 MISSING_CASE(INTEL_GEN(dev_priv
));
3718 /* Default to an unusable block time */
3719 dev_priv
->sagv_block_time_us
= -1;
3723 * SAGV dynamically adjusts the system agent voltage and clock frequencies
3724 * depending on power and performance requirements. The display engine access
3725 * to system memory is blocked during the adjustment time. Because of the
3726 * blocking time, having this enabled can cause full system hangs and/or pipe
3727 * underruns if we don't meet all of the following requirements:
3729 * - <= 1 pipe enabled
3730 * - All planes can enable watermarks for latencies >= SAGV engine block time
3731 * - We're not using an interlaced display configuration
3734 intel_enable_sagv(struct drm_i915_private
*dev_priv
)
3738 if (!intel_has_sagv(dev_priv
))
3741 if (dev_priv
->sagv_status
== I915_SAGV_ENABLED
)
3744 drm_dbg_kms(&dev_priv
->drm
, "Enabling SAGV\n");
3745 ret
= sandybridge_pcode_write(dev_priv
, GEN9_PCODE_SAGV_CONTROL
,
3748 /* We don't need to wait for SAGV when enabling */
3751 * Some skl systems, pre-release machines in particular,
3752 * don't actually have SAGV.
3754 if (IS_SKYLAKE(dev_priv
) && ret
== -ENXIO
) {
3755 drm_dbg(&dev_priv
->drm
, "No SAGV found on system, ignoring\n");
3756 dev_priv
->sagv_status
= I915_SAGV_NOT_CONTROLLED
;
3758 } else if (ret
< 0) {
3759 drm_err(&dev_priv
->drm
, "Failed to enable SAGV\n");
3763 dev_priv
->sagv_status
= I915_SAGV_ENABLED
;
3768 intel_disable_sagv(struct drm_i915_private
*dev_priv
)
3772 if (!intel_has_sagv(dev_priv
))
3775 if (dev_priv
->sagv_status
== I915_SAGV_DISABLED
)
3778 drm_dbg_kms(&dev_priv
->drm
, "Disabling SAGV\n");
3779 /* bspec says to keep retrying for at least 1 ms */
3780 ret
= skl_pcode_request(dev_priv
, GEN9_PCODE_SAGV_CONTROL
,
3782 GEN9_SAGV_IS_DISABLED
, GEN9_SAGV_IS_DISABLED
,
3785 * Some skl systems, pre-release machines in particular,
3786 * don't actually have SAGV.
3788 if (IS_SKYLAKE(dev_priv
) && ret
== -ENXIO
) {
3789 drm_dbg(&dev_priv
->drm
, "No SAGV found on system, ignoring\n");
3790 dev_priv
->sagv_status
= I915_SAGV_NOT_CONTROLLED
;
3792 } else if (ret
< 0) {
3793 drm_err(&dev_priv
->drm
, "Failed to disable SAGV (%d)\n", ret
);
3797 dev_priv
->sagv_status
= I915_SAGV_DISABLED
;
3801 bool intel_can_enable_sagv(struct intel_atomic_state
*state
)
3803 struct drm_device
*dev
= state
->base
.dev
;
3804 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3805 struct intel_crtc
*crtc
;
3806 struct intel_plane
*plane
;
3807 struct intel_crtc_state
*crtc_state
;
3811 if (!intel_has_sagv(dev_priv
))
3815 * If there are no active CRTCs, no additional checks need be performed
3817 if (hweight8(state
->active_pipes
) == 0)
3821 * SKL+ workaround: bspec recommends we disable SAGV when we have
3822 * more then one pipe enabled
3824 if (hweight8(state
->active_pipes
) > 1)
3827 /* Since we're now guaranteed to only have one active CRTC... */
3828 pipe
= ffs(state
->active_pipes
) - 1;
3829 crtc
= intel_get_crtc_for_pipe(dev_priv
, pipe
);
3830 crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
3832 if (crtc_state
->hw
.adjusted_mode
.flags
& DRM_MODE_FLAG_INTERLACE
)
3835 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
3836 struct skl_plane_wm
*wm
=
3837 &crtc_state
->wm
.skl
.optimal
.planes
[plane
->id
];
3839 /* Skip this plane if it's not enabled */
3840 if (!wm
->wm
[0].plane_en
)
3843 /* Find the highest enabled wm level for this plane */
3844 for (level
= ilk_wm_max_level(dev_priv
);
3845 !wm
->wm
[level
].plane_en
; --level
)
3848 latency
= dev_priv
->wm
.skl_latency
[level
];
3850 if (skl_needs_memory_bw_wa(dev_priv
) &&
3851 plane
->base
.state
->fb
->modifier
==
3852 I915_FORMAT_MOD_X_TILED
)
3856 * If any of the planes on this pipe don't enable wm levels that
3857 * incur memory latencies higher than sagv_block_time_us we
3858 * can't enable SAGV.
3860 if (latency
< dev_priv
->sagv_block_time_us
)
3867 static u16
intel_get_ddb_size(struct drm_i915_private
*dev_priv
,
3868 const struct intel_crtc_state
*crtc_state
,
3869 const u64 total_data_rate
,
3870 const int num_active
,
3871 struct skl_ddb_allocation
*ddb
)
3873 const struct drm_display_mode
*adjusted_mode
;
3875 u16 ddb_size
= INTEL_INFO(dev_priv
)->ddb_size
;
3877 WARN_ON(ddb_size
== 0);
3879 if (INTEL_GEN(dev_priv
) < 11)
3880 return ddb_size
- 4; /* 4 blocks for bypass path allocation */
3882 adjusted_mode
= &crtc_state
->hw
.adjusted_mode
;
3883 total_data_bw
= total_data_rate
* drm_mode_vrefresh(adjusted_mode
);
3886 * 12GB/s is maximum BW supported by single DBuf slice.
3888 * FIXME dbuf slice code is broken:
3889 * - must wait for planes to stop using the slice before powering it off
3890 * - plane straddling both slices is illegal in multi-pipe scenarios
3891 * - should validate we stay within the hw bandwidth limits
3893 if (0 && (num_active
> 1 || total_data_bw
>= GBps(12))) {
3894 ddb
->enabled_slices
= 2;
3896 ddb
->enabled_slices
= 1;
3904 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private
*dev_priv
,
3905 const struct intel_crtc_state
*crtc_state
,
3906 const u64 total_data_rate
,
3907 struct skl_ddb_allocation
*ddb
,
3908 struct skl_ddb_entry
*alloc
, /* out */
3909 int *num_active
/* out */)
3911 struct drm_atomic_state
*state
= crtc_state
->uapi
.state
;
3912 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
3913 struct drm_crtc
*for_crtc
= crtc_state
->uapi
.crtc
;
3914 const struct intel_crtc
*crtc
;
3915 u32 pipe_width
= 0, total_width
= 0, width_before_pipe
= 0;
3916 enum pipe for_pipe
= to_intel_crtc(for_crtc
)->pipe
;
3920 if (WARN_ON(!state
) || !crtc_state
->hw
.active
) {
3923 *num_active
= hweight8(dev_priv
->active_pipes
);
3927 if (intel_state
->active_pipe_changes
)
3928 *num_active
= hweight8(intel_state
->active_pipes
);
3930 *num_active
= hweight8(dev_priv
->active_pipes
);
3932 ddb_size
= intel_get_ddb_size(dev_priv
, crtc_state
, total_data_rate
,
3936 * If the state doesn't change the active CRTC's or there is no
3937 * modeset request, then there's no need to recalculate;
3938 * the existing pipe allocation limits should remain unchanged.
3939 * Note that we're safe from racing commits since any racing commit
3940 * that changes the active CRTC list or do modeset would need to
3941 * grab _all_ crtc locks, including the one we currently hold.
3943 if (!intel_state
->active_pipe_changes
&& !intel_state
->modeset
) {
3945 * alloc may be cleared by clear_intel_crtc_state,
3946 * copy from old state to be sure
3948 *alloc
= to_intel_crtc_state(for_crtc
->state
)->wm
.skl
.ddb
;
3953 * Watermark/ddb requirement highly depends upon width of the
3954 * framebuffer, So instead of allocating DDB equally among pipes
3955 * distribute DDB based on resolution/width of the display.
3957 for_each_new_intel_crtc_in_state(intel_state
, crtc
, crtc_state
, i
) {
3958 const struct drm_display_mode
*adjusted_mode
=
3959 &crtc_state
->hw
.adjusted_mode
;
3960 enum pipe pipe
= crtc
->pipe
;
3961 int hdisplay
, vdisplay
;
3963 if (!crtc_state
->hw
.enable
)
3966 drm_mode_get_hv_timing(adjusted_mode
, &hdisplay
, &vdisplay
);
3967 total_width
+= hdisplay
;
3969 if (pipe
< for_pipe
)
3970 width_before_pipe
+= hdisplay
;
3971 else if (pipe
== for_pipe
)
3972 pipe_width
= hdisplay
;
3975 alloc
->start
= ddb_size
* width_before_pipe
/ total_width
;
3976 alloc
->end
= ddb_size
* (width_before_pipe
+ pipe_width
) / total_width
;
3979 static int skl_compute_wm_params(const struct intel_crtc_state
*crtc_state
,
3980 int width
, const struct drm_format_info
*format
,
3981 u64 modifier
, unsigned int rotation
,
3982 u32 plane_pixel_rate
, struct skl_wm_params
*wp
,
3984 static void skl_compute_plane_wm(const struct intel_crtc_state
*crtc_state
,
3986 const struct skl_wm_params
*wp
,
3987 const struct skl_wm_level
*result_prev
,
3988 struct skl_wm_level
*result
/* out */);
3991 skl_cursor_allocation(const struct intel_crtc_state
*crtc_state
,
3994 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
3995 int level
, max_level
= ilk_wm_max_level(dev_priv
);
3996 struct skl_wm_level wm
= {};
3997 int ret
, min_ddb_alloc
= 0;
3998 struct skl_wm_params wp
;
4000 ret
= skl_compute_wm_params(crtc_state
, 256,
4001 drm_format_info(DRM_FORMAT_ARGB8888
),
4002 DRM_FORMAT_MOD_LINEAR
,
4004 crtc_state
->pixel_rate
, &wp
, 0);
4007 for (level
= 0; level
<= max_level
; level
++) {
4008 skl_compute_plane_wm(crtc_state
, level
, &wp
, &wm
, &wm
);
4009 if (wm
.min_ddb_alloc
== U16_MAX
)
4012 min_ddb_alloc
= wm
.min_ddb_alloc
;
4015 return max(num_active
== 1 ? 32 : 8, min_ddb_alloc
);
4018 static void skl_ddb_entry_init_from_hw(struct drm_i915_private
*dev_priv
,
4019 struct skl_ddb_entry
*entry
, u32 reg
)
4022 entry
->start
= reg
& DDB_ENTRY_MASK
;
4023 entry
->end
= (reg
>> DDB_ENTRY_END_SHIFT
) & DDB_ENTRY_MASK
;
4030 skl_ddb_get_hw_plane_state(struct drm_i915_private
*dev_priv
,
4031 const enum pipe pipe
,
4032 const enum plane_id plane_id
,
4033 struct skl_ddb_entry
*ddb_y
,
4034 struct skl_ddb_entry
*ddb_uv
)
4039 /* Cursor doesn't support NV12/planar, so no extra calculation needed */
4040 if (plane_id
== PLANE_CURSOR
) {
4041 val
= I915_READ(CUR_BUF_CFG(pipe
));
4042 skl_ddb_entry_init_from_hw(dev_priv
, ddb_y
, val
);
4046 val
= I915_READ(PLANE_CTL(pipe
, plane_id
));
4048 /* No DDB allocated for disabled planes */
4049 if (val
& PLANE_CTL_ENABLE
)
4050 fourcc
= skl_format_to_fourcc(val
& PLANE_CTL_FORMAT_MASK
,
4051 val
& PLANE_CTL_ORDER_RGBX
,
4052 val
& PLANE_CTL_ALPHA_MASK
);
4054 if (INTEL_GEN(dev_priv
) >= 11) {
4055 val
= I915_READ(PLANE_BUF_CFG(pipe
, plane_id
));
4056 skl_ddb_entry_init_from_hw(dev_priv
, ddb_y
, val
);
4058 val
= I915_READ(PLANE_BUF_CFG(pipe
, plane_id
));
4059 val2
= I915_READ(PLANE_NV12_BUF_CFG(pipe
, plane_id
));
4062 drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc
)))
4065 skl_ddb_entry_init_from_hw(dev_priv
, ddb_y
, val
);
4066 skl_ddb_entry_init_from_hw(dev_priv
, ddb_uv
, val2
);
4070 void skl_pipe_ddb_get_hw_state(struct intel_crtc
*crtc
,
4071 struct skl_ddb_entry
*ddb_y
,
4072 struct skl_ddb_entry
*ddb_uv
)
4074 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
4075 enum intel_display_power_domain power_domain
;
4076 enum pipe pipe
= crtc
->pipe
;
4077 intel_wakeref_t wakeref
;
4078 enum plane_id plane_id
;
4080 power_domain
= POWER_DOMAIN_PIPE(pipe
);
4081 wakeref
= intel_display_power_get_if_enabled(dev_priv
, power_domain
);
4085 for_each_plane_id_on_crtc(crtc
, plane_id
)
4086 skl_ddb_get_hw_plane_state(dev_priv
, pipe
,
4091 intel_display_power_put(dev_priv
, power_domain
, wakeref
);
4094 void skl_ddb_get_hw_state(struct drm_i915_private
*dev_priv
,
4095 struct skl_ddb_allocation
*ddb
/* out */)
4097 ddb
->enabled_slices
= intel_enabled_dbuf_slices_num(dev_priv
);
4101 * Determines the downscale amount of a plane for the purposes of watermark calculations.
4102 * The bspec defines downscale amount as:
4105 * Horizontal down scale amount = maximum[1, Horizontal source size /
4106 * Horizontal destination size]
4107 * Vertical down scale amount = maximum[1, Vertical source size /
4108 * Vertical destination size]
4109 * Total down scale amount = Horizontal down scale amount *
4110 * Vertical down scale amount
4113 * Return value is provided in 16.16 fixed point form to retain fractional part.
4114 * Caller should take care of dividing & rounding off the value.
4116 static uint_fixed_16_16_t
4117 skl_plane_downscale_amount(const struct intel_crtc_state
*crtc_state
,
4118 const struct intel_plane_state
*plane_state
)
4120 u32 src_w
, src_h
, dst_w
, dst_h
;
4121 uint_fixed_16_16_t fp_w_ratio
, fp_h_ratio
;
4122 uint_fixed_16_16_t downscale_h
, downscale_w
;
4124 if (WARN_ON(!intel_wm_plane_visible(crtc_state
, plane_state
)))
4125 return u32_to_fixed16(0);
4128 * Src coordinates are already rotated by 270 degrees for
4129 * the 90/270 degree plane rotation cases (to match the
4130 * GTT mapping), hence no need to account for rotation here.
4132 * n.b., src is 16.16 fixed point, dst is whole integer.
4134 src_w
= drm_rect_width(&plane_state
->uapi
.src
) >> 16;
4135 src_h
= drm_rect_height(&plane_state
->uapi
.src
) >> 16;
4136 dst_w
= drm_rect_width(&plane_state
->uapi
.dst
);
4137 dst_h
= drm_rect_height(&plane_state
->uapi
.dst
);
4139 fp_w_ratio
= div_fixed16(src_w
, dst_w
);
4140 fp_h_ratio
= div_fixed16(src_h
, dst_h
);
4141 downscale_w
= max_fixed16(fp_w_ratio
, u32_to_fixed16(1));
4142 downscale_h
= max_fixed16(fp_h_ratio
, u32_to_fixed16(1));
4144 return mul_fixed16(downscale_w
, downscale_h
);
4148 skl_plane_relative_data_rate(const struct intel_crtc_state
*crtc_state
,
4149 const struct intel_plane_state
*plane_state
,
4152 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
4153 const struct drm_framebuffer
*fb
= plane_state
->hw
.fb
;
4155 u32 width
= 0, height
= 0;
4156 uint_fixed_16_16_t down_scale_amount
;
4159 if (!plane_state
->uapi
.visible
)
4162 if (plane
->id
== PLANE_CURSOR
)
4165 if (color_plane
== 1 &&
4166 !intel_format_info_is_yuv_semiplanar(fb
->format
, fb
->modifier
))
4170 * Src coordinates are already rotated by 270 degrees for
4171 * the 90/270 degree plane rotation cases (to match the
4172 * GTT mapping), hence no need to account for rotation here.
4174 width
= drm_rect_width(&plane_state
->uapi
.src
) >> 16;
4175 height
= drm_rect_height(&plane_state
->uapi
.src
) >> 16;
4177 /* UV plane does 1/2 pixel sub-sampling */
4178 if (color_plane
== 1) {
4183 data_rate
= width
* height
;
4185 down_scale_amount
= skl_plane_downscale_amount(crtc_state
, plane_state
);
4187 rate
= mul_round_up_u32_fixed16(data_rate
, down_scale_amount
);
4189 rate
*= fb
->format
->cpp
[color_plane
];
4194 skl_get_total_relative_data_rate(struct intel_crtc_state
*crtc_state
,
4195 u64
*plane_data_rate
,
4196 u64
*uv_plane_data_rate
)
4198 struct drm_atomic_state
*state
= crtc_state
->uapi
.state
;
4199 struct intel_plane
*plane
;
4200 const struct intel_plane_state
*plane_state
;
4201 u64 total_data_rate
= 0;
4203 if (WARN_ON(!state
))
4206 /* Calculate and cache data rate for each plane */
4207 intel_atomic_crtc_state_for_each_plane_state(plane
, plane_state
, crtc_state
) {
4208 enum plane_id plane_id
= plane
->id
;
4212 rate
= skl_plane_relative_data_rate(crtc_state
, plane_state
, 0);
4213 plane_data_rate
[plane_id
] = rate
;
4214 total_data_rate
+= rate
;
4217 rate
= skl_plane_relative_data_rate(crtc_state
, plane_state
, 1);
4218 uv_plane_data_rate
[plane_id
] = rate
;
4219 total_data_rate
+= rate
;
4222 return total_data_rate
;
4226 icl_get_total_relative_data_rate(struct intel_crtc_state
*crtc_state
,
4227 u64
*plane_data_rate
)
4229 struct intel_plane
*plane
;
4230 const struct intel_plane_state
*plane_state
;
4231 u64 total_data_rate
= 0;
4233 if (WARN_ON(!crtc_state
->uapi
.state
))
4236 /* Calculate and cache data rate for each plane */
4237 intel_atomic_crtc_state_for_each_plane_state(plane
, plane_state
, crtc_state
) {
4238 enum plane_id plane_id
= plane
->id
;
4241 if (!plane_state
->planar_linked_plane
) {
4242 rate
= skl_plane_relative_data_rate(crtc_state
, plane_state
, 0);
4243 plane_data_rate
[plane_id
] = rate
;
4244 total_data_rate
+= rate
;
4246 enum plane_id y_plane_id
;
4249 * The slave plane might not iterate in
4250 * intel_atomic_crtc_state_for_each_plane_state(),
4251 * and needs the master plane state which may be
4252 * NULL if we try get_new_plane_state(), so we
4253 * always calculate from the master.
4255 if (plane_state
->planar_slave
)
4258 /* Y plane rate is calculated on the slave */
4259 rate
= skl_plane_relative_data_rate(crtc_state
, plane_state
, 0);
4260 y_plane_id
= plane_state
->planar_linked_plane
->id
;
4261 plane_data_rate
[y_plane_id
] = rate
;
4262 total_data_rate
+= rate
;
4264 rate
= skl_plane_relative_data_rate(crtc_state
, plane_state
, 1);
4265 plane_data_rate
[plane_id
] = rate
;
4266 total_data_rate
+= rate
;
4270 return total_data_rate
;
4274 skl_allocate_pipe_ddb(struct intel_crtc_state
*crtc_state
,
4275 struct skl_ddb_allocation
*ddb
/* out */)
4277 struct drm_atomic_state
*state
= crtc_state
->uapi
.state
;
4278 struct drm_crtc
*crtc
= crtc_state
->uapi
.crtc
;
4279 struct drm_i915_private
*dev_priv
= to_i915(crtc
->dev
);
4280 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4281 struct skl_ddb_entry
*alloc
= &crtc_state
->wm
.skl
.ddb
;
4282 u16 alloc_size
, start
= 0;
4283 u16 total
[I915_MAX_PLANES
] = {};
4284 u16 uv_total
[I915_MAX_PLANES
] = {};
4285 u64 total_data_rate
;
4286 enum plane_id plane_id
;
4288 u64 plane_data_rate
[I915_MAX_PLANES
] = {};
4289 u64 uv_plane_data_rate
[I915_MAX_PLANES
] = {};
4293 /* Clear the partitioning for disabled planes. */
4294 memset(crtc_state
->wm
.skl
.plane_ddb_y
, 0, sizeof(crtc_state
->wm
.skl
.plane_ddb_y
));
4295 memset(crtc_state
->wm
.skl
.plane_ddb_uv
, 0, sizeof(crtc_state
->wm
.skl
.plane_ddb_uv
));
4297 if (WARN_ON(!state
))
4300 if (!crtc_state
->hw
.active
) {
4301 alloc
->start
= alloc
->end
= 0;
4305 if (INTEL_GEN(dev_priv
) >= 11)
4307 icl_get_total_relative_data_rate(crtc_state
,
4311 skl_get_total_relative_data_rate(crtc_state
,
4313 uv_plane_data_rate
);
4316 skl_ddb_get_pipe_allocation_limits(dev_priv
, crtc_state
, total_data_rate
,
4317 ddb
, alloc
, &num_active
);
4318 alloc_size
= skl_ddb_entry_size(alloc
);
4319 if (alloc_size
== 0)
4322 /* Allocate fixed number of blocks for cursor. */
4323 total
[PLANE_CURSOR
] = skl_cursor_allocation(crtc_state
, num_active
);
4324 alloc_size
-= total
[PLANE_CURSOR
];
4325 crtc_state
->wm
.skl
.plane_ddb_y
[PLANE_CURSOR
].start
=
4326 alloc
->end
- total
[PLANE_CURSOR
];
4327 crtc_state
->wm
.skl
.plane_ddb_y
[PLANE_CURSOR
].end
= alloc
->end
;
4329 if (total_data_rate
== 0)
4333 * Find the highest watermark level for which we can satisfy the block
4334 * requirement of active planes.
4336 for (level
= ilk_wm_max_level(dev_priv
); level
>= 0; level
--) {
4338 for_each_plane_id_on_crtc(intel_crtc
, plane_id
) {
4339 const struct skl_plane_wm
*wm
=
4340 &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
4342 if (plane_id
== PLANE_CURSOR
) {
4343 if (wm
->wm
[level
].min_ddb_alloc
> total
[PLANE_CURSOR
]) {
4344 WARN_ON(wm
->wm
[level
].min_ddb_alloc
!= U16_MAX
);
4351 blocks
+= wm
->wm
[level
].min_ddb_alloc
;
4352 blocks
+= wm
->uv_wm
[level
].min_ddb_alloc
;
4355 if (blocks
<= alloc_size
) {
4356 alloc_size
-= blocks
;
4362 drm_dbg_kms(&dev_priv
->drm
,
4363 "Requested display configuration exceeds system DDB limitations");
4364 drm_dbg_kms(&dev_priv
->drm
, "minimum required %d/%d\n",
4365 blocks
, alloc_size
);
4370 * Grant each plane the blocks it requires at the highest achievable
4371 * watermark level, plus an extra share of the leftover blocks
4372 * proportional to its relative data rate.
4374 for_each_plane_id_on_crtc(intel_crtc
, plane_id
) {
4375 const struct skl_plane_wm
*wm
=
4376 &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
4380 if (plane_id
== PLANE_CURSOR
)
4384 * We've accounted for all active planes; remaining planes are
4387 if (total_data_rate
== 0)
4390 rate
= plane_data_rate
[plane_id
];
4391 extra
= min_t(u16
, alloc_size
,
4392 DIV64_U64_ROUND_UP(alloc_size
* rate
,
4394 total
[plane_id
] = wm
->wm
[level
].min_ddb_alloc
+ extra
;
4395 alloc_size
-= extra
;
4396 total_data_rate
-= rate
;
4398 if (total_data_rate
== 0)
4401 rate
= uv_plane_data_rate
[plane_id
];
4402 extra
= min_t(u16
, alloc_size
,
4403 DIV64_U64_ROUND_UP(alloc_size
* rate
,
4405 uv_total
[plane_id
] = wm
->uv_wm
[level
].min_ddb_alloc
+ extra
;
4406 alloc_size
-= extra
;
4407 total_data_rate
-= rate
;
4409 WARN_ON(alloc_size
!= 0 || total_data_rate
!= 0);
4411 /* Set the actual DDB start/end points for each plane */
4412 start
= alloc
->start
;
4413 for_each_plane_id_on_crtc(intel_crtc
, plane_id
) {
4414 struct skl_ddb_entry
*plane_alloc
=
4415 &crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
];
4416 struct skl_ddb_entry
*uv_plane_alloc
=
4417 &crtc_state
->wm
.skl
.plane_ddb_uv
[plane_id
];
4419 if (plane_id
== PLANE_CURSOR
)
4422 /* Gen11+ uses a separate plane for UV watermarks */
4423 WARN_ON(INTEL_GEN(dev_priv
) >= 11 && uv_total
[plane_id
]);
4425 /* Leave disabled planes at (0,0) */
4426 if (total
[plane_id
]) {
4427 plane_alloc
->start
= start
;
4428 start
+= total
[plane_id
];
4429 plane_alloc
->end
= start
;
4432 if (uv_total
[plane_id
]) {
4433 uv_plane_alloc
->start
= start
;
4434 start
+= uv_total
[plane_id
];
4435 uv_plane_alloc
->end
= start
;
4440 * When we calculated watermark values we didn't know how high
4441 * of a level we'd actually be able to hit, so we just marked
4442 * all levels as "enabled." Go back now and disable the ones
4443 * that aren't actually possible.
4445 for (level
++; level
<= ilk_wm_max_level(dev_priv
); level
++) {
4446 for_each_plane_id_on_crtc(intel_crtc
, plane_id
) {
4447 struct skl_plane_wm
*wm
=
4448 &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
4451 * We only disable the watermarks for each plane if
4452 * they exceed the ddb allocation of said plane. This
4453 * is done so that we don't end up touching cursor
4454 * watermarks needlessly when some other plane reduces
4455 * our max possible watermark level.
4457 * Bspec has this to say about the PLANE_WM enable bit:
4458 * "All the watermarks at this level for all enabled
4459 * planes must be enabled before the level will be used."
4460 * So this is actually safe to do.
4462 if (wm
->wm
[level
].min_ddb_alloc
> total
[plane_id
] ||
4463 wm
->uv_wm
[level
].min_ddb_alloc
> uv_total
[plane_id
])
4464 memset(&wm
->wm
[level
], 0, sizeof(wm
->wm
[level
]));
4467 * Wa_1408961008:icl, ehl
4468 * Underruns with WM1+ disabled
4470 if (IS_GEN(dev_priv
, 11) &&
4471 level
== 1 && wm
->wm
[0].plane_en
) {
4472 wm
->wm
[level
].plane_res_b
= wm
->wm
[0].plane_res_b
;
4473 wm
->wm
[level
].plane_res_l
= wm
->wm
[0].plane_res_l
;
4474 wm
->wm
[level
].ignore_lines
= wm
->wm
[0].ignore_lines
;
4480 * Go back and disable the transition watermark if it turns out we
4481 * don't have enough DDB blocks for it.
4483 for_each_plane_id_on_crtc(intel_crtc
, plane_id
) {
4484 struct skl_plane_wm
*wm
=
4485 &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
4487 if (wm
->trans_wm
.plane_res_b
>= total
[plane_id
])
4488 memset(&wm
->trans_wm
, 0, sizeof(wm
->trans_wm
));
4495 * The max latency should be 257 (max the punit can code is 255 and we add 2us
4496 * for the read latency) and cpp should always be <= 8, so that
4497 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
4498 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
4500 static uint_fixed_16_16_t
4501 skl_wm_method1(const struct drm_i915_private
*dev_priv
, u32 pixel_rate
,
4502 u8 cpp
, u32 latency
, u32 dbuf_block_size
)
4504 u32 wm_intermediate_val
;
4505 uint_fixed_16_16_t ret
;
4508 return FP_16_16_MAX
;
4510 wm_intermediate_val
= latency
* pixel_rate
* cpp
;
4511 ret
= div_fixed16(wm_intermediate_val
, 1000 * dbuf_block_size
);
4513 if (INTEL_GEN(dev_priv
) >= 10)
4514 ret
= add_fixed16_u32(ret
, 1);
4519 static uint_fixed_16_16_t
4520 skl_wm_method2(u32 pixel_rate
, u32 pipe_htotal
, u32 latency
,
4521 uint_fixed_16_16_t plane_blocks_per_line
)
4523 u32 wm_intermediate_val
;
4524 uint_fixed_16_16_t ret
;
4527 return FP_16_16_MAX
;
4529 wm_intermediate_val
= latency
* pixel_rate
;
4530 wm_intermediate_val
= DIV_ROUND_UP(wm_intermediate_val
,
4531 pipe_htotal
* 1000);
4532 ret
= mul_u32_fixed16(wm_intermediate_val
, plane_blocks_per_line
);
4536 static uint_fixed_16_16_t
4537 intel_get_linetime_us(const struct intel_crtc_state
*crtc_state
)
4541 uint_fixed_16_16_t linetime_us
;
4543 if (!crtc_state
->hw
.active
)
4544 return u32_to_fixed16(0);
4546 pixel_rate
= crtc_state
->pixel_rate
;
4548 if (WARN_ON(pixel_rate
== 0))
4549 return u32_to_fixed16(0);
4551 crtc_htotal
= crtc_state
->hw
.adjusted_mode
.crtc_htotal
;
4552 linetime_us
= div_fixed16(crtc_htotal
* 1000, pixel_rate
);
4558 skl_adjusted_plane_pixel_rate(const struct intel_crtc_state
*crtc_state
,
4559 const struct intel_plane_state
*plane_state
)
4561 u64 adjusted_pixel_rate
;
4562 uint_fixed_16_16_t downscale_amount
;
4564 /* Shouldn't reach here on disabled planes... */
4565 if (WARN_ON(!intel_wm_plane_visible(crtc_state
, plane_state
)))
4569 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
4570 * with additional adjustments for plane-specific scaling.
4572 adjusted_pixel_rate
= crtc_state
->pixel_rate
;
4573 downscale_amount
= skl_plane_downscale_amount(crtc_state
, plane_state
);
4575 return mul_round_up_u32_fixed16(adjusted_pixel_rate
,
4580 skl_compute_wm_params(const struct intel_crtc_state
*crtc_state
,
4581 int width
, const struct drm_format_info
*format
,
4582 u64 modifier
, unsigned int rotation
,
4583 u32 plane_pixel_rate
, struct skl_wm_params
*wp
,
4586 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
4587 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
4590 /* only planar format has two planes */
4591 if (color_plane
== 1 &&
4592 !intel_format_info_is_yuv_semiplanar(format
, modifier
)) {
4593 drm_dbg_kms(&dev_priv
->drm
,
4594 "Non planar format have single plane\n");
4598 wp
->y_tiled
= modifier
== I915_FORMAT_MOD_Y_TILED
||
4599 modifier
== I915_FORMAT_MOD_Yf_TILED
||
4600 modifier
== I915_FORMAT_MOD_Y_TILED_CCS
||
4601 modifier
== I915_FORMAT_MOD_Yf_TILED_CCS
;
4602 wp
->x_tiled
= modifier
== I915_FORMAT_MOD_X_TILED
;
4603 wp
->rc_surface
= modifier
== I915_FORMAT_MOD_Y_TILED_CCS
||
4604 modifier
== I915_FORMAT_MOD_Yf_TILED_CCS
;
4605 wp
->is_planar
= intel_format_info_is_yuv_semiplanar(format
, modifier
);
4608 if (color_plane
== 1 && wp
->is_planar
)
4611 wp
->cpp
= format
->cpp
[color_plane
];
4612 wp
->plane_pixel_rate
= plane_pixel_rate
;
4614 if (INTEL_GEN(dev_priv
) >= 11 &&
4615 modifier
== I915_FORMAT_MOD_Yf_TILED
&& wp
->cpp
== 1)
4616 wp
->dbuf_block_size
= 256;
4618 wp
->dbuf_block_size
= 512;
4620 if (drm_rotation_90_or_270(rotation
)) {
4623 wp
->y_min_scanlines
= 16;
4626 wp
->y_min_scanlines
= 8;
4629 wp
->y_min_scanlines
= 4;
4632 MISSING_CASE(wp
->cpp
);
4636 wp
->y_min_scanlines
= 4;
4639 if (skl_needs_memory_bw_wa(dev_priv
))
4640 wp
->y_min_scanlines
*= 2;
4642 wp
->plane_bytes_per_line
= wp
->width
* wp
->cpp
;
4644 interm_pbpl
= DIV_ROUND_UP(wp
->plane_bytes_per_line
*
4645 wp
->y_min_scanlines
,
4646 wp
->dbuf_block_size
);
4648 if (INTEL_GEN(dev_priv
) >= 10)
4651 wp
->plane_blocks_per_line
= div_fixed16(interm_pbpl
,
4652 wp
->y_min_scanlines
);
4653 } else if (wp
->x_tiled
&& IS_GEN(dev_priv
, 9)) {
4654 interm_pbpl
= DIV_ROUND_UP(wp
->plane_bytes_per_line
,
4655 wp
->dbuf_block_size
);
4656 wp
->plane_blocks_per_line
= u32_to_fixed16(interm_pbpl
);
4658 interm_pbpl
= DIV_ROUND_UP(wp
->plane_bytes_per_line
,
4659 wp
->dbuf_block_size
) + 1;
4660 wp
->plane_blocks_per_line
= u32_to_fixed16(interm_pbpl
);
4663 wp
->y_tile_minimum
= mul_u32_fixed16(wp
->y_min_scanlines
,
4664 wp
->plane_blocks_per_line
);
4666 wp
->linetime_us
= fixed16_to_u32_round_up(
4667 intel_get_linetime_us(crtc_state
));
4673 skl_compute_plane_wm_params(const struct intel_crtc_state
*crtc_state
,
4674 const struct intel_plane_state
*plane_state
,
4675 struct skl_wm_params
*wp
, int color_plane
)
4677 const struct drm_framebuffer
*fb
= plane_state
->hw
.fb
;
4681 * Src coordinates are already rotated by 270 degrees for
4682 * the 90/270 degree plane rotation cases (to match the
4683 * GTT mapping), hence no need to account for rotation here.
4685 width
= drm_rect_width(&plane_state
->uapi
.src
) >> 16;
4687 return skl_compute_wm_params(crtc_state
, width
,
4688 fb
->format
, fb
->modifier
,
4689 plane_state
->hw
.rotation
,
4690 skl_adjusted_plane_pixel_rate(crtc_state
, plane_state
),
4694 static bool skl_wm_has_lines(struct drm_i915_private
*dev_priv
, int level
)
4696 if (INTEL_GEN(dev_priv
) >= 10 || IS_GEMINILAKE(dev_priv
))
4699 /* The number of lines are ignored for the level 0 watermark. */
4703 static void skl_compute_plane_wm(const struct intel_crtc_state
*crtc_state
,
4705 const struct skl_wm_params
*wp
,
4706 const struct skl_wm_level
*result_prev
,
4707 struct skl_wm_level
*result
/* out */)
4709 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
4710 u32 latency
= dev_priv
->wm
.skl_latency
[level
];
4711 uint_fixed_16_16_t method1
, method2
;
4712 uint_fixed_16_16_t selected_result
;
4713 u32 res_blocks
, res_lines
, min_ddb_alloc
= 0;
4717 result
->min_ddb_alloc
= U16_MAX
;
4722 * WaIncreaseLatencyIPCEnabled: kbl,cfl
4723 * Display WA #1141: kbl,cfl
4725 if ((IS_KABYLAKE(dev_priv
) || IS_COFFEELAKE(dev_priv
)) ||
4726 dev_priv
->ipc_enabled
)
4729 if (skl_needs_memory_bw_wa(dev_priv
) && wp
->x_tiled
)
4732 method1
= skl_wm_method1(dev_priv
, wp
->plane_pixel_rate
,
4733 wp
->cpp
, latency
, wp
->dbuf_block_size
);
4734 method2
= skl_wm_method2(wp
->plane_pixel_rate
,
4735 crtc_state
->hw
.adjusted_mode
.crtc_htotal
,
4737 wp
->plane_blocks_per_line
);
4740 selected_result
= max_fixed16(method2
, wp
->y_tile_minimum
);
4742 if ((wp
->cpp
* crtc_state
->hw
.adjusted_mode
.crtc_htotal
/
4743 wp
->dbuf_block_size
< 1) &&
4744 (wp
->plane_bytes_per_line
/ wp
->dbuf_block_size
< 1)) {
4745 selected_result
= method2
;
4746 } else if (latency
>= wp
->linetime_us
) {
4747 if (IS_GEN(dev_priv
, 9) &&
4748 !IS_GEMINILAKE(dev_priv
))
4749 selected_result
= min_fixed16(method1
, method2
);
4751 selected_result
= method2
;
4753 selected_result
= method1
;
4757 res_blocks
= fixed16_to_u32_round_up(selected_result
) + 1;
4758 res_lines
= div_round_up_fixed16(selected_result
,
4759 wp
->plane_blocks_per_line
);
4761 if (IS_GEN9_BC(dev_priv
) || IS_BROXTON(dev_priv
)) {
4762 /* Display WA #1125: skl,bxt,kbl */
4763 if (level
== 0 && wp
->rc_surface
)
4765 fixed16_to_u32_round_up(wp
->y_tile_minimum
);
4767 /* Display WA #1126: skl,bxt,kbl */
4768 if (level
>= 1 && level
<= 7) {
4771 fixed16_to_u32_round_up(wp
->y_tile_minimum
);
4772 res_lines
+= wp
->y_min_scanlines
;
4778 * Make sure result blocks for higher latency levels are
4779 * atleast as high as level below the current level.
4780 * Assumption in DDB algorithm optimization for special
4781 * cases. Also covers Display WA #1125 for RC.
4783 if (result_prev
->plane_res_b
> res_blocks
)
4784 res_blocks
= result_prev
->plane_res_b
;
4788 if (INTEL_GEN(dev_priv
) >= 11) {
4792 if (res_lines
% wp
->y_min_scanlines
== 0)
4793 extra_lines
= wp
->y_min_scanlines
;
4795 extra_lines
= wp
->y_min_scanlines
* 2 -
4796 res_lines
% wp
->y_min_scanlines
;
4798 min_ddb_alloc
= mul_round_up_u32_fixed16(res_lines
+ extra_lines
,
4799 wp
->plane_blocks_per_line
);
4801 min_ddb_alloc
= res_blocks
+
4802 DIV_ROUND_UP(res_blocks
, 10);
4806 if (!skl_wm_has_lines(dev_priv
, level
))
4809 if (res_lines
> 31) {
4811 result
->min_ddb_alloc
= U16_MAX
;
4816 * If res_lines is valid, assume we can use this watermark level
4817 * for now. We'll come back and disable it after we calculate the
4818 * DDB allocation if it turns out we don't actually have enough
4819 * blocks to satisfy it.
4821 result
->plane_res_b
= res_blocks
;
4822 result
->plane_res_l
= res_lines
;
4823 /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
4824 result
->min_ddb_alloc
= max(min_ddb_alloc
, res_blocks
) + 1;
4825 result
->plane_en
= true;
4829 skl_compute_wm_levels(const struct intel_crtc_state
*crtc_state
,
4830 const struct skl_wm_params
*wm_params
,
4831 struct skl_wm_level
*levels
)
4833 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
4834 int level
, max_level
= ilk_wm_max_level(dev_priv
);
4835 struct skl_wm_level
*result_prev
= &levels
[0];
4837 for (level
= 0; level
<= max_level
; level
++) {
4838 struct skl_wm_level
*result
= &levels
[level
];
4840 skl_compute_plane_wm(crtc_state
, level
, wm_params
,
4841 result_prev
, result
);
4843 result_prev
= result
;
4848 skl_compute_linetime_wm(const struct intel_crtc_state
*crtc_state
)
4850 struct drm_atomic_state
*state
= crtc_state
->uapi
.state
;
4851 struct drm_i915_private
*dev_priv
= to_i915(state
->dev
);
4852 uint_fixed_16_16_t linetime_us
;
4855 linetime_us
= intel_get_linetime_us(crtc_state
);
4856 linetime_wm
= fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us
));
4858 /* Display WA #1135: BXT:ALL GLK:ALL */
4859 if (IS_GEN9_LP(dev_priv
) && dev_priv
->ipc_enabled
)
4865 static void skl_compute_transition_wm(const struct intel_crtc_state
*crtc_state
,
4866 const struct skl_wm_params
*wp
,
4867 struct skl_plane_wm
*wm
)
4869 struct drm_device
*dev
= crtc_state
->uapi
.crtc
->dev
;
4870 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
4871 u16 trans_min
, trans_y_tile_min
;
4872 const u16 trans_amount
= 10; /* This is configurable amount */
4873 u16 wm0_sel_res_b
, trans_offset_b
, res_blocks
;
4875 /* Transition WM are not recommended by HW team for GEN9 */
4876 if (INTEL_GEN(dev_priv
) <= 9)
4879 /* Transition WM don't make any sense if ipc is disabled */
4880 if (!dev_priv
->ipc_enabled
)
4884 if (INTEL_GEN(dev_priv
) >= 11)
4887 trans_offset_b
= trans_min
+ trans_amount
;
4890 * The spec asks for Selected Result Blocks for wm0 (the real value),
4891 * not Result Blocks (the integer value). Pay attention to the capital
4892 * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
4893 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
4894 * and since we later will have to get the ceiling of the sum in the
4895 * transition watermarks calculation, we can just pretend Selected
4896 * Result Blocks is Result Blocks minus 1 and it should work for the
4897 * current platforms.
4899 wm0_sel_res_b
= wm
->wm
[0].plane_res_b
- 1;
4903 (u16
)mul_round_up_u32_fixed16(2, wp
->y_tile_minimum
);
4904 res_blocks
= max(wm0_sel_res_b
, trans_y_tile_min
) +
4907 res_blocks
= wm0_sel_res_b
+ trans_offset_b
;
4909 /* WA BUG:1938466 add one block for non y-tile planes */
4910 if (IS_CNL_REVID(dev_priv
, CNL_REVID_A0
, CNL_REVID_A0
))
4916 * Just assume we can enable the transition watermark. After
4917 * computing the DDB we'll come back and disable it if that
4918 * assumption turns out to be false.
4920 wm
->trans_wm
.plane_res_b
= res_blocks
+ 1;
4921 wm
->trans_wm
.plane_en
= true;
4924 static int skl_build_plane_wm_single(struct intel_crtc_state
*crtc_state
,
4925 const struct intel_plane_state
*plane_state
,
4926 enum plane_id plane_id
, int color_plane
)
4928 struct skl_plane_wm
*wm
= &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
4929 struct skl_wm_params wm_params
;
4932 ret
= skl_compute_plane_wm_params(crtc_state
, plane_state
,
4933 &wm_params
, color_plane
);
4937 skl_compute_wm_levels(crtc_state
, &wm_params
, wm
->wm
);
4938 skl_compute_transition_wm(crtc_state
, &wm_params
, wm
);
4943 static int skl_build_plane_wm_uv(struct intel_crtc_state
*crtc_state
,
4944 const struct intel_plane_state
*plane_state
,
4945 enum plane_id plane_id
)
4947 struct skl_plane_wm
*wm
= &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
4948 struct skl_wm_params wm_params
;
4951 wm
->is_planar
= true;
4953 /* uv plane watermarks must also be validated for NV12/Planar */
4954 ret
= skl_compute_plane_wm_params(crtc_state
, plane_state
,
4959 skl_compute_wm_levels(crtc_state
, &wm_params
, wm
->uv_wm
);
4964 static int skl_build_plane_wm(struct intel_crtc_state
*crtc_state
,
4965 const struct intel_plane_state
*plane_state
)
4967 struct intel_plane
*plane
= to_intel_plane(plane_state
->uapi
.plane
);
4968 const struct drm_framebuffer
*fb
= plane_state
->hw
.fb
;
4969 enum plane_id plane_id
= plane
->id
;
4972 if (!intel_wm_plane_visible(crtc_state
, plane_state
))
4975 ret
= skl_build_plane_wm_single(crtc_state
, plane_state
,
4980 if (fb
->format
->is_yuv
&& fb
->format
->num_planes
> 1) {
4981 ret
= skl_build_plane_wm_uv(crtc_state
, plane_state
,
4990 static int icl_build_plane_wm(struct intel_crtc_state
*crtc_state
,
4991 const struct intel_plane_state
*plane_state
)
4993 enum plane_id plane_id
= to_intel_plane(plane_state
->uapi
.plane
)->id
;
4996 /* Watermarks calculated in master */
4997 if (plane_state
->planar_slave
)
5000 if (plane_state
->planar_linked_plane
) {
5001 const struct drm_framebuffer
*fb
= plane_state
->hw
.fb
;
5002 enum plane_id y_plane_id
= plane_state
->planar_linked_plane
->id
;
5004 WARN_ON(!intel_wm_plane_visible(crtc_state
, plane_state
));
5005 WARN_ON(!fb
->format
->is_yuv
||
5006 fb
->format
->num_planes
== 1);
5008 ret
= skl_build_plane_wm_single(crtc_state
, plane_state
,
5013 ret
= skl_build_plane_wm_single(crtc_state
, plane_state
,
5017 } else if (intel_wm_plane_visible(crtc_state
, plane_state
)) {
5018 ret
= skl_build_plane_wm_single(crtc_state
, plane_state
,
5027 static int skl_build_pipe_wm(struct intel_crtc_state
*crtc_state
)
5029 struct drm_i915_private
*dev_priv
= to_i915(crtc_state
->uapi
.crtc
->dev
);
5030 struct skl_pipe_wm
*pipe_wm
= &crtc_state
->wm
.skl
.optimal
;
5031 struct intel_plane
*plane
;
5032 const struct intel_plane_state
*plane_state
;
5036 * We'll only calculate watermarks for planes that are actually
5037 * enabled, so make sure all other planes are set as disabled.
5039 memset(pipe_wm
->planes
, 0, sizeof(pipe_wm
->planes
));
5041 intel_atomic_crtc_state_for_each_plane_state(plane
, plane_state
,
5044 if (INTEL_GEN(dev_priv
) >= 11)
5045 ret
= icl_build_plane_wm(crtc_state
, plane_state
);
5047 ret
= skl_build_plane_wm(crtc_state
, plane_state
);
5052 pipe_wm
->linetime
= skl_compute_linetime_wm(crtc_state
);
5057 static void skl_ddb_entry_write(struct drm_i915_private
*dev_priv
,
5059 const struct skl_ddb_entry
*entry
)
5062 I915_WRITE_FW(reg
, (entry
->end
- 1) << 16 | entry
->start
);
5064 I915_WRITE_FW(reg
, 0);
5067 static void skl_write_wm_level(struct drm_i915_private
*dev_priv
,
5069 const struct skl_wm_level
*level
)
5073 if (level
->plane_en
)
5075 if (level
->ignore_lines
)
5076 val
|= PLANE_WM_IGNORE_LINES
;
5077 val
|= level
->plane_res_b
;
5078 val
|= level
->plane_res_l
<< PLANE_WM_LINES_SHIFT
;
5080 I915_WRITE_FW(reg
, val
);
5083 void skl_write_plane_wm(struct intel_plane
*plane
,
5084 const struct intel_crtc_state
*crtc_state
)
5086 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
5087 int level
, max_level
= ilk_wm_max_level(dev_priv
);
5088 enum plane_id plane_id
= plane
->id
;
5089 enum pipe pipe
= plane
->pipe
;
5090 const struct skl_plane_wm
*wm
=
5091 &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
5092 const struct skl_ddb_entry
*ddb_y
=
5093 &crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
];
5094 const struct skl_ddb_entry
*ddb_uv
=
5095 &crtc_state
->wm
.skl
.plane_ddb_uv
[plane_id
];
5097 for (level
= 0; level
<= max_level
; level
++) {
5098 skl_write_wm_level(dev_priv
, PLANE_WM(pipe
, plane_id
, level
),
5101 skl_write_wm_level(dev_priv
, PLANE_WM_TRANS(pipe
, plane_id
),
5104 if (INTEL_GEN(dev_priv
) >= 11) {
5105 skl_ddb_entry_write(dev_priv
,
5106 PLANE_BUF_CFG(pipe
, plane_id
), ddb_y
);
5111 swap(ddb_y
, ddb_uv
);
5113 skl_ddb_entry_write(dev_priv
,
5114 PLANE_BUF_CFG(pipe
, plane_id
), ddb_y
);
5115 skl_ddb_entry_write(dev_priv
,
5116 PLANE_NV12_BUF_CFG(pipe
, plane_id
), ddb_uv
);
5119 void skl_write_cursor_wm(struct intel_plane
*plane
,
5120 const struct intel_crtc_state
*crtc_state
)
5122 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
5123 int level
, max_level
= ilk_wm_max_level(dev_priv
);
5124 enum plane_id plane_id
= plane
->id
;
5125 enum pipe pipe
= plane
->pipe
;
5126 const struct skl_plane_wm
*wm
=
5127 &crtc_state
->wm
.skl
.optimal
.planes
[plane_id
];
5128 const struct skl_ddb_entry
*ddb
=
5129 &crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
];
5131 for (level
= 0; level
<= max_level
; level
++) {
5132 skl_write_wm_level(dev_priv
, CUR_WM(pipe
, level
),
5135 skl_write_wm_level(dev_priv
, CUR_WM_TRANS(pipe
), &wm
->trans_wm
);
5137 skl_ddb_entry_write(dev_priv
, CUR_BUF_CFG(pipe
), ddb
);
5140 bool skl_wm_level_equals(const struct skl_wm_level
*l1
,
5141 const struct skl_wm_level
*l2
)
5143 return l1
->plane_en
== l2
->plane_en
&&
5144 l1
->ignore_lines
== l2
->ignore_lines
&&
5145 l1
->plane_res_l
== l2
->plane_res_l
&&
5146 l1
->plane_res_b
== l2
->plane_res_b
;
5149 static bool skl_plane_wm_equals(struct drm_i915_private
*dev_priv
,
5150 const struct skl_plane_wm
*wm1
,
5151 const struct skl_plane_wm
*wm2
)
5153 int level
, max_level
= ilk_wm_max_level(dev_priv
);
5155 for (level
= 0; level
<= max_level
; level
++) {
5156 if (!skl_wm_level_equals(&wm1
->wm
[level
], &wm2
->wm
[level
]) ||
5157 !skl_wm_level_equals(&wm1
->uv_wm
[level
], &wm2
->uv_wm
[level
]))
5161 return skl_wm_level_equals(&wm1
->trans_wm
, &wm2
->trans_wm
);
5164 static bool skl_pipe_wm_equals(struct intel_crtc
*crtc
,
5165 const struct skl_pipe_wm
*wm1
,
5166 const struct skl_pipe_wm
*wm2
)
5168 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5169 enum plane_id plane_id
;
5171 for_each_plane_id_on_crtc(crtc
, plane_id
) {
5172 if (!skl_plane_wm_equals(dev_priv
,
5173 &wm1
->planes
[plane_id
],
5174 &wm2
->planes
[plane_id
]))
5178 return wm1
->linetime
== wm2
->linetime
;
5181 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry
*a
,
5182 const struct skl_ddb_entry
*b
)
5184 return a
->start
< b
->end
&& b
->start
< a
->end
;
5187 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry
*ddb
,
5188 const struct skl_ddb_entry
*entries
,
5189 int num_entries
, int ignore_idx
)
5193 for (i
= 0; i
< num_entries
; i
++) {
5194 if (i
!= ignore_idx
&&
5195 skl_ddb_entries_overlap(ddb
, &entries
[i
]))
5203 skl_ddb_add_affected_planes(const struct intel_crtc_state
*old_crtc_state
,
5204 struct intel_crtc_state
*new_crtc_state
)
5206 struct intel_atomic_state
*state
= to_intel_atomic_state(new_crtc_state
->uapi
.state
);
5207 struct intel_crtc
*crtc
= to_intel_crtc(new_crtc_state
->uapi
.crtc
);
5208 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5209 struct intel_plane
*plane
;
5211 for_each_intel_plane_on_crtc(&dev_priv
->drm
, crtc
, plane
) {
5212 struct intel_plane_state
*plane_state
;
5213 enum plane_id plane_id
= plane
->id
;
5215 if (skl_ddb_entry_equal(&old_crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
],
5216 &new_crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
]) &&
5217 skl_ddb_entry_equal(&old_crtc_state
->wm
.skl
.plane_ddb_uv
[plane_id
],
5218 &new_crtc_state
->wm
.skl
.plane_ddb_uv
[plane_id
]))
5221 plane_state
= intel_atomic_get_plane_state(state
, plane
);
5222 if (IS_ERR(plane_state
))
5223 return PTR_ERR(plane_state
);
5225 new_crtc_state
->update_planes
|= BIT(plane_id
);
5232 skl_compute_ddb(struct intel_atomic_state
*state
)
5234 const struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
5235 struct skl_ddb_allocation
*ddb
= &state
->wm_results
.ddb
;
5236 struct intel_crtc_state
*old_crtc_state
;
5237 struct intel_crtc_state
*new_crtc_state
;
5238 struct intel_crtc
*crtc
;
5241 memcpy(ddb
, &dev_priv
->wm
.skl_hw
.ddb
, sizeof(*ddb
));
5243 for_each_oldnew_intel_crtc_in_state(state
, crtc
, old_crtc_state
,
5244 new_crtc_state
, i
) {
5245 ret
= skl_allocate_pipe_ddb(new_crtc_state
, ddb
);
5249 ret
= skl_ddb_add_affected_planes(old_crtc_state
,
5258 static char enast(bool enable
)
5260 return enable
? '*' : ' ';
5264 skl_print_wm_changes(struct intel_atomic_state
*state
)
5266 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
5267 const struct intel_crtc_state
*old_crtc_state
;
5268 const struct intel_crtc_state
*new_crtc_state
;
5269 struct intel_plane
*plane
;
5270 struct intel_crtc
*crtc
;
5273 if (!drm_debug_enabled(DRM_UT_KMS
))
5276 for_each_oldnew_intel_crtc_in_state(state
, crtc
, old_crtc_state
,
5277 new_crtc_state
, i
) {
5278 const struct skl_pipe_wm
*old_pipe_wm
, *new_pipe_wm
;
5280 old_pipe_wm
= &old_crtc_state
->wm
.skl
.optimal
;
5281 new_pipe_wm
= &new_crtc_state
->wm
.skl
.optimal
;
5283 for_each_intel_plane_on_crtc(&dev_priv
->drm
, crtc
, plane
) {
5284 enum plane_id plane_id
= plane
->id
;
5285 const struct skl_ddb_entry
*old
, *new;
5287 old
= &old_crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
];
5288 new = &new_crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
];
5290 if (skl_ddb_entry_equal(old
, new))
5293 drm_dbg_kms(&dev_priv
->drm
,
5294 "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
5295 plane
->base
.base
.id
, plane
->base
.name
,
5296 old
->start
, old
->end
, new->start
, new->end
,
5297 skl_ddb_entry_size(old
), skl_ddb_entry_size(new));
5300 for_each_intel_plane_on_crtc(&dev_priv
->drm
, crtc
, plane
) {
5301 enum plane_id plane_id
= plane
->id
;
5302 const struct skl_plane_wm
*old_wm
, *new_wm
;
5304 old_wm
= &old_pipe_wm
->planes
[plane_id
];
5305 new_wm
= &new_pipe_wm
->planes
[plane_id
];
5307 if (skl_plane_wm_equals(dev_priv
, old_wm
, new_wm
))
5310 drm_dbg_kms(&dev_priv
->drm
,
5311 "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
5312 " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
5313 plane
->base
.base
.id
, plane
->base
.name
,
5314 enast(old_wm
->wm
[0].plane_en
), enast(old_wm
->wm
[1].plane_en
),
5315 enast(old_wm
->wm
[2].plane_en
), enast(old_wm
->wm
[3].plane_en
),
5316 enast(old_wm
->wm
[4].plane_en
), enast(old_wm
->wm
[5].plane_en
),
5317 enast(old_wm
->wm
[6].plane_en
), enast(old_wm
->wm
[7].plane_en
),
5318 enast(old_wm
->trans_wm
.plane_en
),
5319 enast(new_wm
->wm
[0].plane_en
), enast(new_wm
->wm
[1].plane_en
),
5320 enast(new_wm
->wm
[2].plane_en
), enast(new_wm
->wm
[3].plane_en
),
5321 enast(new_wm
->wm
[4].plane_en
), enast(new_wm
->wm
[5].plane_en
),
5322 enast(new_wm
->wm
[6].plane_en
), enast(new_wm
->wm
[7].plane_en
),
5323 enast(new_wm
->trans_wm
.plane_en
));
5325 drm_dbg_kms(&dev_priv
->drm
,
5326 "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
5327 " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
5328 plane
->base
.base
.id
, plane
->base
.name
,
5329 enast(old_wm
->wm
[0].ignore_lines
), old_wm
->wm
[0].plane_res_l
,
5330 enast(old_wm
->wm
[1].ignore_lines
), old_wm
->wm
[1].plane_res_l
,
5331 enast(old_wm
->wm
[2].ignore_lines
), old_wm
->wm
[2].plane_res_l
,
5332 enast(old_wm
->wm
[3].ignore_lines
), old_wm
->wm
[3].plane_res_l
,
5333 enast(old_wm
->wm
[4].ignore_lines
), old_wm
->wm
[4].plane_res_l
,
5334 enast(old_wm
->wm
[5].ignore_lines
), old_wm
->wm
[5].plane_res_l
,
5335 enast(old_wm
->wm
[6].ignore_lines
), old_wm
->wm
[6].plane_res_l
,
5336 enast(old_wm
->wm
[7].ignore_lines
), old_wm
->wm
[7].plane_res_l
,
5337 enast(old_wm
->trans_wm
.ignore_lines
), old_wm
->trans_wm
.plane_res_l
,
5339 enast(new_wm
->wm
[0].ignore_lines
), new_wm
->wm
[0].plane_res_l
,
5340 enast(new_wm
->wm
[1].ignore_lines
), new_wm
->wm
[1].plane_res_l
,
5341 enast(new_wm
->wm
[2].ignore_lines
), new_wm
->wm
[2].plane_res_l
,
5342 enast(new_wm
->wm
[3].ignore_lines
), new_wm
->wm
[3].plane_res_l
,
5343 enast(new_wm
->wm
[4].ignore_lines
), new_wm
->wm
[4].plane_res_l
,
5344 enast(new_wm
->wm
[5].ignore_lines
), new_wm
->wm
[5].plane_res_l
,
5345 enast(new_wm
->wm
[6].ignore_lines
), new_wm
->wm
[6].plane_res_l
,
5346 enast(new_wm
->wm
[7].ignore_lines
), new_wm
->wm
[7].plane_res_l
,
5347 enast(new_wm
->trans_wm
.ignore_lines
), new_wm
->trans_wm
.plane_res_l
);
5349 drm_dbg_kms(&dev_priv
->drm
,
5350 "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5351 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5352 plane
->base
.base
.id
, plane
->base
.name
,
5353 old_wm
->wm
[0].plane_res_b
, old_wm
->wm
[1].plane_res_b
,
5354 old_wm
->wm
[2].plane_res_b
, old_wm
->wm
[3].plane_res_b
,
5355 old_wm
->wm
[4].plane_res_b
, old_wm
->wm
[5].plane_res_b
,
5356 old_wm
->wm
[6].plane_res_b
, old_wm
->wm
[7].plane_res_b
,
5357 old_wm
->trans_wm
.plane_res_b
,
5358 new_wm
->wm
[0].plane_res_b
, new_wm
->wm
[1].plane_res_b
,
5359 new_wm
->wm
[2].plane_res_b
, new_wm
->wm
[3].plane_res_b
,
5360 new_wm
->wm
[4].plane_res_b
, new_wm
->wm
[5].plane_res_b
,
5361 new_wm
->wm
[6].plane_res_b
, new_wm
->wm
[7].plane_res_b
,
5362 new_wm
->trans_wm
.plane_res_b
);
5364 drm_dbg_kms(&dev_priv
->drm
,
5365 "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
5366 " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
5367 plane
->base
.base
.id
, plane
->base
.name
,
5368 old_wm
->wm
[0].min_ddb_alloc
, old_wm
->wm
[1].min_ddb_alloc
,
5369 old_wm
->wm
[2].min_ddb_alloc
, old_wm
->wm
[3].min_ddb_alloc
,
5370 old_wm
->wm
[4].min_ddb_alloc
, old_wm
->wm
[5].min_ddb_alloc
,
5371 old_wm
->wm
[6].min_ddb_alloc
, old_wm
->wm
[7].min_ddb_alloc
,
5372 old_wm
->trans_wm
.min_ddb_alloc
,
5373 new_wm
->wm
[0].min_ddb_alloc
, new_wm
->wm
[1].min_ddb_alloc
,
5374 new_wm
->wm
[2].min_ddb_alloc
, new_wm
->wm
[3].min_ddb_alloc
,
5375 new_wm
->wm
[4].min_ddb_alloc
, new_wm
->wm
[5].min_ddb_alloc
,
5376 new_wm
->wm
[6].min_ddb_alloc
, new_wm
->wm
[7].min_ddb_alloc
,
5377 new_wm
->trans_wm
.min_ddb_alloc
);
5382 static int intel_add_all_pipes(struct intel_atomic_state
*state
)
5384 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
5385 struct intel_crtc
*crtc
;
5387 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
5388 struct intel_crtc_state
*crtc_state
;
5390 crtc_state
= intel_atomic_get_crtc_state(&state
->base
, crtc
);
5391 if (IS_ERR(crtc_state
))
5392 return PTR_ERR(crtc_state
);
5399 skl_ddb_add_affected_pipes(struct intel_atomic_state
*state
)
5401 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
5405 * If this is our first atomic update following hardware readout,
5406 * we can't trust the DDB that the BIOS programmed for us. Let's
5407 * pretend that all pipes switched active status so that we'll
5408 * ensure a full DDB recompute.
5410 if (dev_priv
->wm
.distrust_bios_wm
) {
5411 ret
= drm_modeset_lock(&dev_priv
->drm
.mode_config
.connection_mutex
,
5412 state
->base
.acquire_ctx
);
5416 state
->active_pipe_changes
= INTEL_INFO(dev_priv
)->pipe_mask
;
5419 * We usually only initialize state->active_pipes if we
5420 * we're doing a modeset; make sure this field is always
5421 * initialized during the sanitization process that happens
5422 * on the first commit too.
5424 if (!state
->modeset
)
5425 state
->active_pipes
= dev_priv
->active_pipes
;
5429 * If the modeset changes which CRTC's are active, we need to
5430 * recompute the DDB allocation for *all* active pipes, even
5431 * those that weren't otherwise being modified in any way by this
5432 * atomic commit. Due to the shrinking of the per-pipe allocations
5433 * when new active CRTC's are added, it's possible for a pipe that
5434 * we were already using and aren't changing at all here to suddenly
5435 * become invalid if its DDB needs exceeds its new allocation.
5437 * Note that if we wind up doing a full DDB recompute, we can't let
5438 * any other display updates race with this transaction, so we need
5439 * to grab the lock on *all* CRTC's.
5441 if (state
->active_pipe_changes
|| state
->modeset
) {
5442 state
->wm_results
.dirty_pipes
= INTEL_INFO(dev_priv
)->pipe_mask
;
5444 ret
= intel_add_all_pipes(state
);
5453 * To make sure the cursor watermark registers are always consistent
5454 * with our computed state the following scenario needs special
5458 * 2. move cursor entirely offscreen
5461 * Step 2. does call .disable_plane() but does not zero the watermarks
5462 * (since we consider an offscreen cursor still active for the purposes
5463 * of watermarks). Step 3. would not normally call .disable_plane()
5464 * because the actual plane visibility isn't changing, and we don't
5465 * deallocate the cursor ddb until the pipe gets disabled. So we must
5466 * force step 3. to call .disable_plane() to update the watermark
5467 * registers properly.
5469 * Other planes do not suffer from this issues as their watermarks are
5470 * calculated based on the actual plane visibility. The only time this
5471 * can trigger for the other planes is during the initial readout as the
5472 * default value of the watermarks registers is not zero.
5474 static int skl_wm_add_affected_planes(struct intel_atomic_state
*state
,
5475 struct intel_crtc
*crtc
)
5477 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5478 const struct intel_crtc_state
*old_crtc_state
=
5479 intel_atomic_get_old_crtc_state(state
, crtc
);
5480 struct intel_crtc_state
*new_crtc_state
=
5481 intel_atomic_get_new_crtc_state(state
, crtc
);
5482 struct intel_plane
*plane
;
5484 for_each_intel_plane_on_crtc(&dev_priv
->drm
, crtc
, plane
) {
5485 struct intel_plane_state
*plane_state
;
5486 enum plane_id plane_id
= plane
->id
;
5489 * Force a full wm update for every plane on modeset.
5490 * Required because the reset value of the wm registers
5491 * is non-zero, whereas we want all disabled planes to
5492 * have zero watermarks. So if we turn off the relevant
5493 * power well the hardware state will go out of sync
5494 * with the software state.
5496 if (!drm_atomic_crtc_needs_modeset(&new_crtc_state
->uapi
) &&
5497 skl_plane_wm_equals(dev_priv
,
5498 &old_crtc_state
->wm
.skl
.optimal
.planes
[plane_id
],
5499 &new_crtc_state
->wm
.skl
.optimal
.planes
[plane_id
]))
5502 plane_state
= intel_atomic_get_plane_state(state
, plane
);
5503 if (IS_ERR(plane_state
))
5504 return PTR_ERR(plane_state
);
5506 new_crtc_state
->update_planes
|= BIT(plane_id
);
5513 skl_compute_wm(struct intel_atomic_state
*state
)
5515 struct intel_crtc
*crtc
;
5516 struct intel_crtc_state
*new_crtc_state
;
5517 struct intel_crtc_state
*old_crtc_state
;
5518 struct skl_ddb_values
*results
= &state
->wm_results
;
5521 /* Clear all dirty flags */
5522 results
->dirty_pipes
= 0;
5524 ret
= skl_ddb_add_affected_pipes(state
);
5529 * Calculate WM's for all pipes that are part of this transaction.
5530 * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
5531 * weren't otherwise being modified (and set bits in dirty_pipes) if
5532 * pipe allocations had to change.
5534 for_each_oldnew_intel_crtc_in_state(state
, crtc
, old_crtc_state
,
5535 new_crtc_state
, i
) {
5536 ret
= skl_build_pipe_wm(new_crtc_state
);
5540 ret
= skl_wm_add_affected_planes(state
, crtc
);
5544 if (!skl_pipe_wm_equals(crtc
,
5545 &old_crtc_state
->wm
.skl
.optimal
,
5546 &new_crtc_state
->wm
.skl
.optimal
))
5547 results
->dirty_pipes
|= BIT(crtc
->pipe
);
5550 ret
= skl_compute_ddb(state
);
5554 skl_print_wm_changes(state
);
5559 static void skl_atomic_update_crtc_wm(struct intel_atomic_state
*state
,
5560 struct intel_crtc
*crtc
)
5562 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5563 const struct intel_crtc_state
*crtc_state
=
5564 intel_atomic_get_new_crtc_state(state
, crtc
);
5565 const struct skl_pipe_wm
*pipe_wm
= &crtc_state
->wm
.skl
.optimal
;
5566 enum pipe pipe
= crtc
->pipe
;
5568 if ((state
->wm_results
.dirty_pipes
& BIT(crtc
->pipe
)) == 0)
5571 I915_WRITE(PIPE_WM_LINETIME(pipe
), pipe_wm
->linetime
);
5574 static void skl_initial_wm(struct intel_atomic_state
*state
,
5575 struct intel_crtc
*crtc
)
5577 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5578 const struct intel_crtc_state
*crtc_state
=
5579 intel_atomic_get_new_crtc_state(state
, crtc
);
5580 struct skl_ddb_values
*results
= &state
->wm_results
;
5582 if ((results
->dirty_pipes
& BIT(crtc
->pipe
)) == 0)
5585 mutex_lock(&dev_priv
->wm
.wm_mutex
);
5587 if (crtc_state
->uapi
.active_changed
)
5588 skl_atomic_update_crtc_wm(state
, crtc
);
5590 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
5593 static void ilk_compute_wm_config(struct drm_i915_private
*dev_priv
,
5594 struct intel_wm_config
*config
)
5596 struct intel_crtc
*crtc
;
5598 /* Compute the currently _active_ config */
5599 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
5600 const struct intel_pipe_wm
*wm
= &crtc
->wm
.active
.ilk
;
5602 if (!wm
->pipe_enabled
)
5605 config
->sprites_enabled
|= wm
->sprites_enabled
;
5606 config
->sprites_scaled
|= wm
->sprites_scaled
;
5607 config
->num_pipes_active
++;
5611 static void ilk_program_watermarks(struct drm_i915_private
*dev_priv
)
5613 struct intel_pipe_wm lp_wm_1_2
= {}, lp_wm_5_6
= {}, *best_lp_wm
;
5614 struct ilk_wm_maximums max
;
5615 struct intel_wm_config config
= {};
5616 struct ilk_wm_values results
= {};
5617 enum intel_ddb_partitioning partitioning
;
5619 ilk_compute_wm_config(dev_priv
, &config
);
5621 ilk_compute_wm_maximums(dev_priv
, 1, &config
, INTEL_DDB_PART_1_2
, &max
);
5622 ilk_wm_merge(dev_priv
, &config
, &max
, &lp_wm_1_2
);
5624 /* 5/6 split only in single pipe config on IVB+ */
5625 if (INTEL_GEN(dev_priv
) >= 7 &&
5626 config
.num_pipes_active
== 1 && config
.sprites_enabled
) {
5627 ilk_compute_wm_maximums(dev_priv
, 1, &config
, INTEL_DDB_PART_5_6
, &max
);
5628 ilk_wm_merge(dev_priv
, &config
, &max
, &lp_wm_5_6
);
5630 best_lp_wm
= ilk_find_best_result(dev_priv
, &lp_wm_1_2
, &lp_wm_5_6
);
5632 best_lp_wm
= &lp_wm_1_2
;
5635 partitioning
= (best_lp_wm
== &lp_wm_1_2
) ?
5636 INTEL_DDB_PART_1_2
: INTEL_DDB_PART_5_6
;
5638 ilk_compute_wm_results(dev_priv
, best_lp_wm
, partitioning
, &results
);
5640 ilk_write_wm_values(dev_priv
, &results
);
5643 static void ilk_initial_watermarks(struct intel_atomic_state
*state
,
5644 struct intel_crtc
*crtc
)
5646 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5647 const struct intel_crtc_state
*crtc_state
=
5648 intel_atomic_get_new_crtc_state(state
, crtc
);
5650 mutex_lock(&dev_priv
->wm
.wm_mutex
);
5651 crtc
->wm
.active
.ilk
= crtc_state
->wm
.ilk
.intermediate
;
5652 ilk_program_watermarks(dev_priv
);
5653 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
5656 static void ilk_optimize_watermarks(struct intel_atomic_state
*state
,
5657 struct intel_crtc
*crtc
)
5659 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5660 const struct intel_crtc_state
*crtc_state
=
5661 intel_atomic_get_new_crtc_state(state
, crtc
);
5663 if (!crtc_state
->wm
.need_postvbl_update
)
5666 mutex_lock(&dev_priv
->wm
.wm_mutex
);
5667 crtc
->wm
.active
.ilk
= crtc_state
->wm
.ilk
.optimal
;
5668 ilk_program_watermarks(dev_priv
);
5669 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
5672 static inline void skl_wm_level_from_reg_val(u32 val
,
5673 struct skl_wm_level
*level
)
5675 level
->plane_en
= val
& PLANE_WM_EN
;
5676 level
->ignore_lines
= val
& PLANE_WM_IGNORE_LINES
;
5677 level
->plane_res_b
= val
& PLANE_WM_BLOCKS_MASK
;
5678 level
->plane_res_l
= (val
>> PLANE_WM_LINES_SHIFT
) &
5679 PLANE_WM_LINES_MASK
;
5682 void skl_pipe_wm_get_hw_state(struct intel_crtc
*crtc
,
5683 struct skl_pipe_wm
*out
)
5685 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
5686 enum pipe pipe
= crtc
->pipe
;
5687 int level
, max_level
;
5688 enum plane_id plane_id
;
5691 max_level
= ilk_wm_max_level(dev_priv
);
5693 for_each_plane_id_on_crtc(crtc
, plane_id
) {
5694 struct skl_plane_wm
*wm
= &out
->planes
[plane_id
];
5696 for (level
= 0; level
<= max_level
; level
++) {
5697 if (plane_id
!= PLANE_CURSOR
)
5698 val
= I915_READ(PLANE_WM(pipe
, plane_id
, level
));
5700 val
= I915_READ(CUR_WM(pipe
, level
));
5702 skl_wm_level_from_reg_val(val
, &wm
->wm
[level
]);
5705 if (plane_id
!= PLANE_CURSOR
)
5706 val
= I915_READ(PLANE_WM_TRANS(pipe
, plane_id
));
5708 val
= I915_READ(CUR_WM_TRANS(pipe
));
5710 skl_wm_level_from_reg_val(val
, &wm
->trans_wm
);
5716 out
->linetime
= I915_READ(PIPE_WM_LINETIME(pipe
));
5719 void skl_wm_get_hw_state(struct drm_i915_private
*dev_priv
)
5721 struct skl_ddb_values
*hw
= &dev_priv
->wm
.skl_hw
;
5722 struct skl_ddb_allocation
*ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
5723 struct intel_crtc
*crtc
;
5724 struct intel_crtc_state
*crtc_state
;
5726 skl_ddb_get_hw_state(dev_priv
, ddb
);
5727 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
5728 crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
5730 skl_pipe_wm_get_hw_state(crtc
, &crtc_state
->wm
.skl
.optimal
);
5733 hw
->dirty_pipes
|= BIT(crtc
->pipe
);
5736 if (dev_priv
->active_pipes
) {
5737 /* Fully recompute DDB on first atomic commit */
5738 dev_priv
->wm
.distrust_bios_wm
= true;
5742 static void ilk_pipe_wm_get_hw_state(struct intel_crtc
*crtc
)
5744 struct drm_device
*dev
= crtc
->base
.dev
;
5745 struct drm_i915_private
*dev_priv
= to_i915(dev
);
5746 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
5747 struct intel_crtc_state
*crtc_state
= to_intel_crtc_state(crtc
->base
.state
);
5748 struct intel_pipe_wm
*active
= &crtc_state
->wm
.ilk
.optimal
;
5749 enum pipe pipe
= crtc
->pipe
;
5750 static const i915_reg_t wm0_pipe_reg
[] = {
5751 [PIPE_A
] = WM0_PIPEA_ILK
,
5752 [PIPE_B
] = WM0_PIPEB_ILK
,
5753 [PIPE_C
] = WM0_PIPEC_IVB
,
5756 hw
->wm_pipe
[pipe
] = I915_READ(wm0_pipe_reg
[pipe
]);
5757 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
5758 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
5760 memset(active
, 0, sizeof(*active
));
5762 active
->pipe_enabled
= crtc
->active
;
5764 if (active
->pipe_enabled
) {
5765 u32 tmp
= hw
->wm_pipe
[pipe
];
5768 * For active pipes LP0 watermark is marked as
5769 * enabled, and LP1+ watermaks as disabled since
5770 * we can't really reverse compute them in case
5771 * multiple pipes are active.
5773 active
->wm
[0].enable
= true;
5774 active
->wm
[0].pri_val
= (tmp
& WM0_PIPE_PLANE_MASK
) >> WM0_PIPE_PLANE_SHIFT
;
5775 active
->wm
[0].spr_val
= (tmp
& WM0_PIPE_SPRITE_MASK
) >> WM0_PIPE_SPRITE_SHIFT
;
5776 active
->wm
[0].cur_val
= tmp
& WM0_PIPE_CURSOR_MASK
;
5777 active
->linetime
= hw
->wm_linetime
[pipe
];
5779 int level
, max_level
= ilk_wm_max_level(dev_priv
);
5782 * For inactive pipes, all watermark levels
5783 * should be marked as enabled but zeroed,
5784 * which is what we'd compute them to.
5786 for (level
= 0; level
<= max_level
; level
++)
5787 active
->wm
[level
].enable
= true;
5790 crtc
->wm
.active
.ilk
= *active
;
5793 #define _FW_WM(value, plane) \
5794 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
5795 #define _FW_WM_VLV(value, plane) \
5796 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
5798 static void g4x_read_wm_values(struct drm_i915_private
*dev_priv
,
5799 struct g4x_wm_values
*wm
)
5803 tmp
= I915_READ(DSPFW1
);
5804 wm
->sr
.plane
= _FW_WM(tmp
, SR
);
5805 wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORB
);
5806 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] = _FW_WM(tmp
, PLANEB
);
5807 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] = _FW_WM(tmp
, PLANEA
);
5809 tmp
= I915_READ(DSPFW2
);
5810 wm
->fbc_en
= tmp
& DSPFW_FBC_SR_EN
;
5811 wm
->sr
.fbc
= _FW_WM(tmp
, FBC_SR
);
5812 wm
->hpll
.fbc
= _FW_WM(tmp
, FBC_HPLL_SR
);
5813 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] = _FW_WM(tmp
, SPRITEB
);
5814 wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORA
);
5815 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] = _FW_WM(tmp
, SPRITEA
);
5817 tmp
= I915_READ(DSPFW3
);
5818 wm
->hpll_en
= tmp
& DSPFW_HPLL_SR_EN
;
5819 wm
->sr
.cursor
= _FW_WM(tmp
, CURSOR_SR
);
5820 wm
->hpll
.cursor
= _FW_WM(tmp
, HPLL_CURSOR
);
5821 wm
->hpll
.plane
= _FW_WM(tmp
, HPLL_SR
);
5824 static void vlv_read_wm_values(struct drm_i915_private
*dev_priv
,
5825 struct vlv_wm_values
*wm
)
5830 for_each_pipe(dev_priv
, pipe
) {
5831 tmp
= I915_READ(VLV_DDL(pipe
));
5833 wm
->ddl
[pipe
].plane
[PLANE_PRIMARY
] =
5834 (tmp
>> DDL_PLANE_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
5835 wm
->ddl
[pipe
].plane
[PLANE_CURSOR
] =
5836 (tmp
>> DDL_CURSOR_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
5837 wm
->ddl
[pipe
].plane
[PLANE_SPRITE0
] =
5838 (tmp
>> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
5839 wm
->ddl
[pipe
].plane
[PLANE_SPRITE1
] =
5840 (tmp
>> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
5843 tmp
= I915_READ(DSPFW1
);
5844 wm
->sr
.plane
= _FW_WM(tmp
, SR
);
5845 wm
->pipe
[PIPE_B
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORB
);
5846 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] = _FW_WM_VLV(tmp
, PLANEB
);
5847 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] = _FW_WM_VLV(tmp
, PLANEA
);
5849 tmp
= I915_READ(DSPFW2
);
5850 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITEB
);
5851 wm
->pipe
[PIPE_A
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORA
);
5852 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEA
);
5854 tmp
= I915_READ(DSPFW3
);
5855 wm
->sr
.cursor
= _FW_WM(tmp
, CURSOR_SR
);
5857 if (IS_CHERRYVIEW(dev_priv
)) {
5858 tmp
= I915_READ(DSPFW7_CHV
);
5859 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITED
);
5860 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEC
);
5862 tmp
= I915_READ(DSPFW8_CHV
);
5863 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITEF
);
5864 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEE
);
5866 tmp
= I915_READ(DSPFW9_CHV
);
5867 wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
] = _FW_WM_VLV(tmp
, PLANEC
);
5868 wm
->pipe
[PIPE_C
].plane
[PLANE_CURSOR
] = _FW_WM(tmp
, CURSORC
);
5870 tmp
= I915_READ(DSPHOWM
);
5871 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
5872 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITEF_HI
) << 8;
5873 wm
->pipe
[PIPE_C
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEE_HI
) << 8;
5874 wm
->pipe
[PIPE_C
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEC_HI
) << 8;
5875 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
5876 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
5877 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEB_HI
) << 8;
5878 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
5879 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
5880 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEA_HI
) << 8;
5882 tmp
= I915_READ(DSPFW7
);
5883 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] = _FW_WM_VLV(tmp
, SPRITED
);
5884 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] = _FW_WM_VLV(tmp
, SPRITEC
);
5886 tmp
= I915_READ(DSPHOWM
);
5887 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
5888 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
5889 wm
->pipe
[PIPE_B
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
5890 wm
->pipe
[PIPE_B
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEB_HI
) << 8;
5891 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE1
] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
5892 wm
->pipe
[PIPE_A
].plane
[PLANE_SPRITE0
] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
5893 wm
->pipe
[PIPE_A
].plane
[PLANE_PRIMARY
] |= _FW_WM(tmp
, PLANEA_HI
) << 8;
5900 void g4x_wm_get_hw_state(struct drm_i915_private
*dev_priv
)
5902 struct g4x_wm_values
*wm
= &dev_priv
->wm
.g4x
;
5903 struct intel_crtc
*crtc
;
5905 g4x_read_wm_values(dev_priv
, wm
);
5907 wm
->cxsr
= I915_READ(FW_BLC_SELF
) & FW_BLC_SELF_EN
;
5909 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
5910 struct intel_crtc_state
*crtc_state
=
5911 to_intel_crtc_state(crtc
->base
.state
);
5912 struct g4x_wm_state
*active
= &crtc
->wm
.active
.g4x
;
5913 struct g4x_pipe_wm
*raw
;
5914 enum pipe pipe
= crtc
->pipe
;
5915 enum plane_id plane_id
;
5916 int level
, max_level
;
5918 active
->cxsr
= wm
->cxsr
;
5919 active
->hpll_en
= wm
->hpll_en
;
5920 active
->fbc_en
= wm
->fbc_en
;
5922 active
->sr
= wm
->sr
;
5923 active
->hpll
= wm
->hpll
;
5925 for_each_plane_id_on_crtc(crtc
, plane_id
) {
5926 active
->wm
.plane
[plane_id
] =
5927 wm
->pipe
[pipe
].plane
[plane_id
];
5930 if (wm
->cxsr
&& wm
->hpll_en
)
5931 max_level
= G4X_WM_LEVEL_HPLL
;
5933 max_level
= G4X_WM_LEVEL_SR
;
5935 max_level
= G4X_WM_LEVEL_NORMAL
;
5937 level
= G4X_WM_LEVEL_NORMAL
;
5938 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
5939 for_each_plane_id_on_crtc(crtc
, plane_id
)
5940 raw
->plane
[plane_id
] = active
->wm
.plane
[plane_id
];
5942 if (++level
> max_level
)
5945 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
5946 raw
->plane
[PLANE_PRIMARY
] = active
->sr
.plane
;
5947 raw
->plane
[PLANE_CURSOR
] = active
->sr
.cursor
;
5948 raw
->plane
[PLANE_SPRITE0
] = 0;
5949 raw
->fbc
= active
->sr
.fbc
;
5951 if (++level
> max_level
)
5954 raw
= &crtc_state
->wm
.g4x
.raw
[level
];
5955 raw
->plane
[PLANE_PRIMARY
] = active
->hpll
.plane
;
5956 raw
->plane
[PLANE_CURSOR
] = active
->hpll
.cursor
;
5957 raw
->plane
[PLANE_SPRITE0
] = 0;
5958 raw
->fbc
= active
->hpll
.fbc
;
5961 for_each_plane_id_on_crtc(crtc
, plane_id
)
5962 g4x_raw_plane_wm_set(crtc_state
, level
,
5963 plane_id
, USHRT_MAX
);
5964 g4x_raw_fbc_wm_set(crtc_state
, level
, USHRT_MAX
);
5966 crtc_state
->wm
.g4x
.optimal
= *active
;
5967 crtc_state
->wm
.g4x
.intermediate
= *active
;
5969 drm_dbg_kms(&dev_priv
->drm
,
5970 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
5972 wm
->pipe
[pipe
].plane
[PLANE_PRIMARY
],
5973 wm
->pipe
[pipe
].plane
[PLANE_CURSOR
],
5974 wm
->pipe
[pipe
].plane
[PLANE_SPRITE0
]);
5977 drm_dbg_kms(&dev_priv
->drm
,
5978 "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
5979 wm
->sr
.plane
, wm
->sr
.cursor
, wm
->sr
.fbc
);
5980 drm_dbg_kms(&dev_priv
->drm
,
5981 "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
5982 wm
->hpll
.plane
, wm
->hpll
.cursor
, wm
->hpll
.fbc
);
5983 drm_dbg_kms(&dev_priv
->drm
, "Initial SR=%s HPLL=%s FBC=%s\n",
5984 yesno(wm
->cxsr
), yesno(wm
->hpll_en
), yesno(wm
->fbc_en
));
5987 void g4x_wm_sanitize(struct drm_i915_private
*dev_priv
)
5989 struct intel_plane
*plane
;
5990 struct intel_crtc
*crtc
;
5992 mutex_lock(&dev_priv
->wm
.wm_mutex
);
5994 for_each_intel_plane(&dev_priv
->drm
, plane
) {
5995 struct intel_crtc
*crtc
=
5996 intel_get_crtc_for_pipe(dev_priv
, plane
->pipe
);
5997 struct intel_crtc_state
*crtc_state
=
5998 to_intel_crtc_state(crtc
->base
.state
);
5999 struct intel_plane_state
*plane_state
=
6000 to_intel_plane_state(plane
->base
.state
);
6001 struct g4x_wm_state
*wm_state
= &crtc_state
->wm
.g4x
.optimal
;
6002 enum plane_id plane_id
= plane
->id
;
6005 if (plane_state
->uapi
.visible
)
6008 for (level
= 0; level
< 3; level
++) {
6009 struct g4x_pipe_wm
*raw
=
6010 &crtc_state
->wm
.g4x
.raw
[level
];
6012 raw
->plane
[plane_id
] = 0;
6013 wm_state
->wm
.plane
[plane_id
] = 0;
6016 if (plane_id
== PLANE_PRIMARY
) {
6017 for (level
= 0; level
< 3; level
++) {
6018 struct g4x_pipe_wm
*raw
=
6019 &crtc_state
->wm
.g4x
.raw
[level
];
6023 wm_state
->sr
.fbc
= 0;
6024 wm_state
->hpll
.fbc
= 0;
6025 wm_state
->fbc_en
= false;
6029 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
6030 struct intel_crtc_state
*crtc_state
=
6031 to_intel_crtc_state(crtc
->base
.state
);
6033 crtc_state
->wm
.g4x
.intermediate
=
6034 crtc_state
->wm
.g4x
.optimal
;
6035 crtc
->wm
.active
.g4x
= crtc_state
->wm
.g4x
.optimal
;
6038 g4x_program_watermarks(dev_priv
);
6040 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
6043 void vlv_wm_get_hw_state(struct drm_i915_private
*dev_priv
)
6045 struct vlv_wm_values
*wm
= &dev_priv
->wm
.vlv
;
6046 struct intel_crtc
*crtc
;
6049 vlv_read_wm_values(dev_priv
, wm
);
6051 wm
->cxsr
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
6052 wm
->level
= VLV_WM_LEVEL_PM2
;
6054 if (IS_CHERRYVIEW(dev_priv
)) {
6055 vlv_punit_get(dev_priv
);
6057 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPSSPM
);
6058 if (val
& DSP_MAXFIFO_PM5_ENABLE
)
6059 wm
->level
= VLV_WM_LEVEL_PM5
;
6062 * If DDR DVFS is disabled in the BIOS, Punit
6063 * will never ack the request. So if that happens
6064 * assume we don't have to enable/disable DDR DVFS
6065 * dynamically. To test that just set the REQ_ACK
6066 * bit to poke the Punit, but don't change the
6067 * HIGH/LOW bits so that we don't actually change
6068 * the current state.
6070 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
6071 val
|= FORCE_DDR_FREQ_REQ_ACK
;
6072 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
6074 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
6075 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3)) {
6076 drm_dbg_kms(&dev_priv
->drm
,
6077 "Punit not acking DDR DVFS request, "
6078 "assuming DDR DVFS is disabled\n");
6079 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM5
;
6081 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
6082 if ((val
& FORCE_DDR_HIGH_FREQ
) == 0)
6083 wm
->level
= VLV_WM_LEVEL_DDR_DVFS
;
6086 vlv_punit_put(dev_priv
);
6089 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
6090 struct intel_crtc_state
*crtc_state
=
6091 to_intel_crtc_state(crtc
->base
.state
);
6092 struct vlv_wm_state
*active
= &crtc
->wm
.active
.vlv
;
6093 const struct vlv_fifo_state
*fifo_state
=
6094 &crtc_state
->wm
.vlv
.fifo_state
;
6095 enum pipe pipe
= crtc
->pipe
;
6096 enum plane_id plane_id
;
6099 vlv_get_fifo_size(crtc_state
);
6101 active
->num_levels
= wm
->level
+ 1;
6102 active
->cxsr
= wm
->cxsr
;
6104 for (level
= 0; level
< active
->num_levels
; level
++) {
6105 struct g4x_pipe_wm
*raw
=
6106 &crtc_state
->wm
.vlv
.raw
[level
];
6108 active
->sr
[level
].plane
= wm
->sr
.plane
;
6109 active
->sr
[level
].cursor
= wm
->sr
.cursor
;
6111 for_each_plane_id_on_crtc(crtc
, plane_id
) {
6112 active
->wm
[level
].plane
[plane_id
] =
6113 wm
->pipe
[pipe
].plane
[plane_id
];
6115 raw
->plane
[plane_id
] =
6116 vlv_invert_wm_value(active
->wm
[level
].plane
[plane_id
],
6117 fifo_state
->plane
[plane_id
]);
6121 for_each_plane_id_on_crtc(crtc
, plane_id
)
6122 vlv_raw_plane_wm_set(crtc_state
, level
,
6123 plane_id
, USHRT_MAX
);
6124 vlv_invalidate_wms(crtc
, active
, level
);
6126 crtc_state
->wm
.vlv
.optimal
= *active
;
6127 crtc_state
->wm
.vlv
.intermediate
= *active
;
6129 drm_dbg_kms(&dev_priv
->drm
,
6130 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
6132 wm
->pipe
[pipe
].plane
[PLANE_PRIMARY
],
6133 wm
->pipe
[pipe
].plane
[PLANE_CURSOR
],
6134 wm
->pipe
[pipe
].plane
[PLANE_SPRITE0
],
6135 wm
->pipe
[pipe
].plane
[PLANE_SPRITE1
]);
6138 drm_dbg_kms(&dev_priv
->drm
,
6139 "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
6140 wm
->sr
.plane
, wm
->sr
.cursor
, wm
->level
, wm
->cxsr
);
6143 void vlv_wm_sanitize(struct drm_i915_private
*dev_priv
)
6145 struct intel_plane
*plane
;
6146 struct intel_crtc
*crtc
;
6148 mutex_lock(&dev_priv
->wm
.wm_mutex
);
6150 for_each_intel_plane(&dev_priv
->drm
, plane
) {
6151 struct intel_crtc
*crtc
=
6152 intel_get_crtc_for_pipe(dev_priv
, plane
->pipe
);
6153 struct intel_crtc_state
*crtc_state
=
6154 to_intel_crtc_state(crtc
->base
.state
);
6155 struct intel_plane_state
*plane_state
=
6156 to_intel_plane_state(plane
->base
.state
);
6157 struct vlv_wm_state
*wm_state
= &crtc_state
->wm
.vlv
.optimal
;
6158 const struct vlv_fifo_state
*fifo_state
=
6159 &crtc_state
->wm
.vlv
.fifo_state
;
6160 enum plane_id plane_id
= plane
->id
;
6163 if (plane_state
->uapi
.visible
)
6166 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
6167 struct g4x_pipe_wm
*raw
=
6168 &crtc_state
->wm
.vlv
.raw
[level
];
6170 raw
->plane
[plane_id
] = 0;
6172 wm_state
->wm
[level
].plane
[plane_id
] =
6173 vlv_invert_wm_value(raw
->plane
[plane_id
],
6174 fifo_state
->plane
[plane_id
]);
6178 for_each_intel_crtc(&dev_priv
->drm
, crtc
) {
6179 struct intel_crtc_state
*crtc_state
=
6180 to_intel_crtc_state(crtc
->base
.state
);
6182 crtc_state
->wm
.vlv
.intermediate
=
6183 crtc_state
->wm
.vlv
.optimal
;
6184 crtc
->wm
.active
.vlv
= crtc_state
->wm
.vlv
.optimal
;
6187 vlv_program_watermarks(dev_priv
);
6189 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
6193 * FIXME should probably kill this and improve
6194 * the real watermark readout/sanitation instead
6196 static void ilk_init_lp_watermarks(struct drm_i915_private
*dev_priv
)
6198 I915_WRITE(WM3_LP_ILK
, I915_READ(WM3_LP_ILK
) & ~WM1_LP_SR_EN
);
6199 I915_WRITE(WM2_LP_ILK
, I915_READ(WM2_LP_ILK
) & ~WM1_LP_SR_EN
);
6200 I915_WRITE(WM1_LP_ILK
, I915_READ(WM1_LP_ILK
) & ~WM1_LP_SR_EN
);
6203 * Don't touch WM1S_LP_EN here.
6204 * Doing so could cause underruns.
6208 void ilk_wm_get_hw_state(struct drm_i915_private
*dev_priv
)
6210 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
6211 struct intel_crtc
*crtc
;
6213 ilk_init_lp_watermarks(dev_priv
);
6215 for_each_intel_crtc(&dev_priv
->drm
, crtc
)
6216 ilk_pipe_wm_get_hw_state(crtc
);
6218 hw
->wm_lp
[0] = I915_READ(WM1_LP_ILK
);
6219 hw
->wm_lp
[1] = I915_READ(WM2_LP_ILK
);
6220 hw
->wm_lp
[2] = I915_READ(WM3_LP_ILK
);
6222 hw
->wm_lp_spr
[0] = I915_READ(WM1S_LP_ILK
);
6223 if (INTEL_GEN(dev_priv
) >= 7) {
6224 hw
->wm_lp_spr
[1] = I915_READ(WM2S_LP_IVB
);
6225 hw
->wm_lp_spr
[2] = I915_READ(WM3S_LP_IVB
);
6228 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
6229 hw
->partitioning
= (I915_READ(WM_MISC
) & WM_MISC_DATA_PARTITION_5_6
) ?
6230 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
6231 else if (IS_IVYBRIDGE(dev_priv
))
6232 hw
->partitioning
= (I915_READ(DISP_ARB_CTL2
) & DISP_DATA_PARTITION_5_6
) ?
6233 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
6236 !(I915_READ(DISP_ARB_CTL
) & DISP_FBC_WM_DIS
);
6240 * intel_update_watermarks - update FIFO watermark values based on current modes
6241 * @crtc: the #intel_crtc on which to compute the WM
6243 * Calculate watermark values for the various WM regs based on current mode
6244 * and plane configuration.
6246 * There are several cases to deal with here:
6247 * - normal (i.e. non-self-refresh)
6248 * - self-refresh (SR) mode
6249 * - lines are large relative to FIFO size (buffer can hold up to 2)
6250 * - lines are small relative to FIFO size (buffer can hold more than 2
6251 * lines), so need to account for TLB latency
6253 * The normal calculation is:
6254 * watermark = dotclock * bytes per pixel * latency
6255 * where latency is platform & configuration dependent (we assume pessimal
6258 * The SR calculation is:
6259 * watermark = (trunc(latency/line time)+1) * surface width *
6262 * line time = htotal / dotclock
6263 * surface width = hdisplay for normal plane and 64 for cursor
6264 * and latency is assumed to be high, as above.
6266 * The final value programmed to the register should always be rounded up,
6267 * and include an extra 2 entries to account for clock crossings.
6269 * We don't use the sprite, so we can ignore that. And on Crestline we have
6270 * to set the non-SR watermarks to 8.
6272 void intel_update_watermarks(struct intel_crtc
*crtc
)
6274 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
6276 if (dev_priv
->display
.update_wm
)
6277 dev_priv
->display
.update_wm(crtc
);
6280 void intel_enable_ipc(struct drm_i915_private
*dev_priv
)
6284 if (!HAS_IPC(dev_priv
))
6287 val
= I915_READ(DISP_ARB_CTL2
);
6289 if (dev_priv
->ipc_enabled
)
6290 val
|= DISP_IPC_ENABLE
;
6292 val
&= ~DISP_IPC_ENABLE
;
6294 I915_WRITE(DISP_ARB_CTL2
, val
);
6297 static bool intel_can_enable_ipc(struct drm_i915_private
*dev_priv
)
6299 /* Display WA #0477 WaDisableIPC: skl */
6300 if (IS_SKYLAKE(dev_priv
))
6303 /* Display WA #1141: SKL:all KBL:all CFL */
6304 if (IS_KABYLAKE(dev_priv
) || IS_COFFEELAKE(dev_priv
))
6305 return dev_priv
->dram_info
.symmetric_memory
;
6310 void intel_init_ipc(struct drm_i915_private
*dev_priv
)
6312 if (!HAS_IPC(dev_priv
))
6315 dev_priv
->ipc_enabled
= intel_can_enable_ipc(dev_priv
);
6317 intel_enable_ipc(dev_priv
);
6320 static void ibx_init_clock_gating(struct drm_i915_private
*dev_priv
)
6323 * On Ibex Peak and Cougar Point, we need to disable clock
6324 * gating for the panel power sequencer or it will fail to
6325 * start up when no ports are active.
6327 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
6330 static void g4x_disable_trickle_feed(struct drm_i915_private
*dev_priv
)
6334 for_each_pipe(dev_priv
, pipe
) {
6335 I915_WRITE(DSPCNTR(pipe
),
6336 I915_READ(DSPCNTR(pipe
)) |
6337 DISPPLANE_TRICKLE_FEED_DISABLE
);
6339 I915_WRITE(DSPSURF(pipe
), I915_READ(DSPSURF(pipe
)));
6340 POSTING_READ(DSPSURF(pipe
));
6344 static void ilk_init_clock_gating(struct drm_i915_private
*dev_priv
)
6346 u32 dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
6350 * WaFbcDisableDpfcClockGating:ilk
6352 dspclk_gate
|= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE
|
6353 ILK_DPFCUNIT_CLOCK_GATE_DISABLE
|
6354 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
;
6356 I915_WRITE(PCH_3DCGDIS0
,
6357 MARIUNIT_CLOCK_GATE_DISABLE
|
6358 SVSMUNIT_CLOCK_GATE_DISABLE
);
6359 I915_WRITE(PCH_3DCGDIS1
,
6360 VFMUNIT_CLOCK_GATE_DISABLE
);
6363 * According to the spec the following bits should be set in
6364 * order to enable memory self-refresh
6365 * The bit 22/21 of 0x42004
6366 * The bit 5 of 0x42020
6367 * The bit 15 of 0x45000
6369 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6370 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
6371 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
6372 dspclk_gate
|= ILK_DPARBUNIT_CLOCK_GATE_ENABLE
;
6373 I915_WRITE(DISP_ARB_CTL
,
6374 (I915_READ(DISP_ARB_CTL
) |
6378 * Based on the document from hardware guys the following bits
6379 * should be set unconditionally in order to enable FBC.
6380 * The bit 22 of 0x42000
6381 * The bit 22 of 0x42004
6382 * The bit 7,8,9 of 0x42020.
6384 if (IS_IRONLAKE_M(dev_priv
)) {
6385 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6386 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
6387 I915_READ(ILK_DISPLAY_CHICKEN1
) |
6389 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6390 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6394 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
6396 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6397 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6398 ILK_ELPIN_409_SELECT
);
6399 I915_WRITE(_3D_CHICKEN2
,
6400 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
6401 _3D_CHICKEN2_WM_READ_PIPELINED
);
6403 /* WaDisableRenderCachePipelinedFlush:ilk */
6404 I915_WRITE(CACHE_MODE_0
,
6405 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
6407 /* WaDisable_RenderCache_OperationalFlush:ilk */
6408 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6410 g4x_disable_trickle_feed(dev_priv
);
6412 ibx_init_clock_gating(dev_priv
);
6415 static void cpt_init_clock_gating(struct drm_i915_private
*dev_priv
)
6421 * On Ibex Peak and Cougar Point, we need to disable clock
6422 * gating for the panel power sequencer or it will fail to
6423 * start up when no ports are active.
6425 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
|
6426 PCH_DPLUNIT_CLOCK_GATE_DISABLE
|
6427 PCH_CPUNIT_CLOCK_GATE_DISABLE
);
6428 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
6429 DPLS_EDP_PPS_FIX_DIS
);
6430 /* The below fixes the weird display corruption, a few pixels shifted
6431 * downward, on (only) LVDS of some HP laptops with IVY.
6433 for_each_pipe(dev_priv
, pipe
) {
6434 val
= I915_READ(TRANS_CHICKEN2(pipe
));
6435 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
6436 val
&= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
6437 if (dev_priv
->vbt
.fdi_rx_polarity_inverted
)
6438 val
|= TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
6439 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER
;
6440 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH
;
6441 I915_WRITE(TRANS_CHICKEN2(pipe
), val
);
6443 /* WADP0ClockGatingDisable */
6444 for_each_pipe(dev_priv
, pipe
) {
6445 I915_WRITE(TRANS_CHICKEN1(pipe
),
6446 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
6450 static void gen6_check_mch_setup(struct drm_i915_private
*dev_priv
)
6454 tmp
= I915_READ(MCH_SSKPD
);
6455 if ((tmp
& MCH_SSKPD_WM0_MASK
) != MCH_SSKPD_WM0_VAL
)
6456 drm_dbg_kms(&dev_priv
->drm
,
6457 "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6461 static void gen6_init_clock_gating(struct drm_i915_private
*dev_priv
)
6463 u32 dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
6465 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
6467 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6468 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6469 ILK_ELPIN_409_SELECT
);
6471 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6472 I915_WRITE(_3D_CHICKEN
,
6473 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB
));
6475 /* WaDisable_RenderCache_OperationalFlush:snb */
6476 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6479 * BSpec recoomends 8x4 when MSAA is used,
6480 * however in practice 16x4 seems fastest.
6482 * Note that PS/WM thread counts depend on the WIZ hashing
6483 * disable bit, which we don't touch here, but it's good
6484 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6486 I915_WRITE(GEN6_GT_MODE
,
6487 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
6489 I915_WRITE(CACHE_MODE_0
,
6490 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
6492 I915_WRITE(GEN6_UCGCTL1
,
6493 I915_READ(GEN6_UCGCTL1
) |
6494 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
6495 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
6497 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6498 * gating disable must be set. Failure to set it results in
6499 * flickering pixels due to Z write ordering failures after
6500 * some amount of runtime in the Mesa "fire" demo, and Unigine
6501 * Sanctuary and Tropics, and apparently anything else with
6502 * alpha test or pixel discard.
6504 * According to the spec, bit 11 (RCCUNIT) must also be set,
6505 * but we didn't debug actual testcases to find it out.
6507 * WaDisableRCCUnitClockGating:snb
6508 * WaDisableRCPBUnitClockGating:snb
6510 I915_WRITE(GEN6_UCGCTL2
,
6511 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
6512 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
6514 /* WaStripsFansDisableFastClipPerformanceFix:snb */
6515 I915_WRITE(_3D_CHICKEN3
,
6516 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
));
6520 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6521 * 3DSTATE_SF number of SF output attributes is more than 16."
6523 I915_WRITE(_3D_CHICKEN3
,
6524 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH
));
6527 * According to the spec the following bits should be
6528 * set in order to enable memory self-refresh and fbc:
6529 * The bit21 and bit22 of 0x42000
6530 * The bit21 and bit22 of 0x42004
6531 * The bit5 and bit7 of 0x42020
6532 * The bit14 of 0x70180
6533 * The bit14 of 0x71180
6535 * WaFbcAsynchFlipDisableFbcQueue:snb
6537 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
6538 I915_READ(ILK_DISPLAY_CHICKEN1
) |
6539 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
6540 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6541 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6542 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
6543 I915_WRITE(ILK_DSPCLK_GATE_D
,
6544 I915_READ(ILK_DSPCLK_GATE_D
) |
6545 ILK_DPARBUNIT_CLOCK_GATE_ENABLE
|
6546 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
);
6548 g4x_disable_trickle_feed(dev_priv
);
6550 cpt_init_clock_gating(dev_priv
);
6552 gen6_check_mch_setup(dev_priv
);
6555 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
6557 u32 reg
= I915_READ(GEN7_FF_THREAD_MODE
);
6560 * WaVSThreadDispatchOverride:ivb,vlv
6562 * This actually overrides the dispatch
6563 * mode for all thread types.
6565 reg
&= ~GEN7_FF_SCHED_MASK
;
6566 reg
|= GEN7_FF_TS_SCHED_HW
;
6567 reg
|= GEN7_FF_VS_SCHED_HW
;
6568 reg
|= GEN7_FF_DS_SCHED_HW
;
6570 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
6573 static void lpt_init_clock_gating(struct drm_i915_private
*dev_priv
)
6576 * TODO: this bit should only be enabled when really needed, then
6577 * disabled when not needed anymore in order to save power.
6579 if (HAS_PCH_LPT_LP(dev_priv
))
6580 I915_WRITE(SOUTH_DSPCLK_GATE_D
,
6581 I915_READ(SOUTH_DSPCLK_GATE_D
) |
6582 PCH_LP_PARTITION_LEVEL_DISABLE
);
6584 /* WADPOClockGatingDisable:hsw */
6585 I915_WRITE(TRANS_CHICKEN1(PIPE_A
),
6586 I915_READ(TRANS_CHICKEN1(PIPE_A
)) |
6587 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
6590 static void lpt_suspend_hw(struct drm_i915_private
*dev_priv
)
6592 if (HAS_PCH_LPT_LP(dev_priv
)) {
6593 u32 val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
6595 val
&= ~PCH_LP_PARTITION_LEVEL_DISABLE
;
6596 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
6600 static void gen8_set_l3sqc_credits(struct drm_i915_private
*dev_priv
,
6601 int general_prio_credits
,
6602 int high_prio_credits
)
6607 /* WaTempDisableDOPClkGating:bdw */
6608 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
6609 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
6611 val
= I915_READ(GEN8_L3SQCREG1
);
6612 val
&= ~L3_PRIO_CREDITS_MASK
;
6613 val
|= L3_GENERAL_PRIO_CREDITS(general_prio_credits
);
6614 val
|= L3_HIGH_PRIO_CREDITS(high_prio_credits
);
6615 I915_WRITE(GEN8_L3SQCREG1
, val
);
6618 * Wait at least 100 clocks before re-enabling clock gating.
6619 * See the definition of L3SQCREG1 in BSpec.
6621 POSTING_READ(GEN8_L3SQCREG1
);
6623 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
6626 static void icl_init_clock_gating(struct drm_i915_private
*dev_priv
)
6628 /* This is not an Wa. Enable to reduce Sampler power */
6629 I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN
,
6630 I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN
) & ~DFR_DISABLE
);
6632 /* WaEnable32PlaneMode:icl */
6633 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS
,
6634 _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE
));
6637 * Wa_1408615072:icl,ehl (vsunit)
6638 * Wa_1407596294:icl,ehl (hsunit)
6640 intel_uncore_rmw(&dev_priv
->uncore
, UNSLICE_UNIT_LEVEL_CLKGATE
,
6641 0, VSUNIT_CLKGATE_DIS
| HSUNIT_CLKGATE_DIS
);
6643 /* Wa_1407352427:icl,ehl */
6644 intel_uncore_rmw(&dev_priv
->uncore
, UNSLICE_UNIT_LEVEL_CLKGATE2
,
6645 0, PSDUNIT_CLKGATE_DIS
);
6648 static void tgl_init_clock_gating(struct drm_i915_private
*dev_priv
)
6650 u32 vd_pg_enable
= 0;
6653 /* Wa_1408615072:tgl */
6654 intel_uncore_rmw(&dev_priv
->uncore
, UNSLICE_UNIT_LEVEL_CLKGATE2
,
6655 0, VSUNIT_CLKGATE_DIS_TGL
);
6657 /* This is not a WA. Enable VD HCP & MFX_ENC powergate */
6658 for (i
= 0; i
< I915_MAX_VCS
; i
++) {
6659 if (HAS_ENGINE(dev_priv
, _VCS(i
)))
6660 vd_pg_enable
|= VDN_HCP_POWERGATE_ENABLE(i
) |
6661 VDN_MFX_POWERGATE_ENABLE(i
);
6664 I915_WRITE(POWERGATE_ENABLE
,
6665 I915_READ(POWERGATE_ENABLE
) | vd_pg_enable
);
6668 static void cnp_init_clock_gating(struct drm_i915_private
*dev_priv
)
6670 if (!HAS_PCH_CNP(dev_priv
))
6673 /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
6674 I915_WRITE(SOUTH_DSPCLK_GATE_D
, I915_READ(SOUTH_DSPCLK_GATE_D
) |
6675 CNP_PWM_CGE_GATING_DISABLE
);
6678 static void cnl_init_clock_gating(struct drm_i915_private
*dev_priv
)
6681 cnp_init_clock_gating(dev_priv
);
6683 /* This is not an Wa. Enable for better image quality */
6684 I915_WRITE(_3D_CHICKEN3
,
6685 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE
));
6687 /* WaEnableChickenDCPR:cnl */
6688 I915_WRITE(GEN8_CHICKEN_DCPR_1
,
6689 I915_READ(GEN8_CHICKEN_DCPR_1
) | MASK_WAKEMEM
);
6691 /* WaFbcWakeMemOn:cnl */
6692 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
6693 DISP_FBC_MEMORY_WAKE
);
6695 val
= I915_READ(SLICE_UNIT_LEVEL_CLKGATE
);
6696 /* ReadHitWriteOnlyDisable:cnl */
6697 val
|= RCCUNIT_CLKGATE_DIS
;
6698 /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
6699 if (IS_CNL_REVID(dev_priv
, CNL_REVID_A0
, CNL_REVID_B0
))
6700 val
|= SARBUNIT_CLKGATE_DIS
;
6701 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE
, val
);
6703 /* Wa_2201832410:cnl */
6704 val
= I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE
);
6705 val
|= GWUNIT_CLKGATE_DIS
;
6706 I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE
, val
);
6708 /* WaDisableVFclkgate:cnl */
6709 /* WaVFUnitClockGatingDisable:cnl */
6710 val
= I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE
);
6711 val
|= VFUNIT_CLKGATE_DIS
;
6712 I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE
, val
);
6715 static void cfl_init_clock_gating(struct drm_i915_private
*dev_priv
)
6717 cnp_init_clock_gating(dev_priv
);
6718 gen9_init_clock_gating(dev_priv
);
6720 /* WaFbcNukeOnHostModify:cfl */
6721 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
6722 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
6725 static void kbl_init_clock_gating(struct drm_i915_private
*dev_priv
)
6727 gen9_init_clock_gating(dev_priv
);
6729 /* WaDisableSDEUnitClockGating:kbl */
6730 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
6731 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
6732 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
6734 /* WaDisableGamClockGating:kbl */
6735 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
6736 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
6737 GEN6_GAMUNIT_CLOCK_GATE_DISABLE
);
6739 /* WaFbcNukeOnHostModify:kbl */
6740 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
6741 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
6744 static void skl_init_clock_gating(struct drm_i915_private
*dev_priv
)
6746 gen9_init_clock_gating(dev_priv
);
6748 /* WAC6entrylatency:skl */
6749 I915_WRITE(FBC_LLC_READ_CTRL
, I915_READ(FBC_LLC_READ_CTRL
) |
6750 FBC_LLC_FULLY_OPEN
);
6752 /* WaFbcNukeOnHostModify:skl */
6753 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
6754 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
6757 static void bdw_init_clock_gating(struct drm_i915_private
*dev_priv
)
6761 /* WaSwitchSolVfFArbitrationPriority:bdw */
6762 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
6764 /* WaPsrDPAMaskVBlankInSRD:bdw */
6765 I915_WRITE(CHICKEN_PAR1_1
,
6766 I915_READ(CHICKEN_PAR1_1
) | DPA_MASK_VBLANK_SRD
);
6768 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6769 for_each_pipe(dev_priv
, pipe
) {
6770 I915_WRITE(CHICKEN_PIPESL_1(pipe
),
6771 I915_READ(CHICKEN_PIPESL_1(pipe
)) |
6772 BDW_DPRS_MASK_VBLANK_SRD
);
6775 /* WaVSRefCountFullforceMissDisable:bdw */
6776 /* WaDSRefCountFullforceMissDisable:bdw */
6777 I915_WRITE(GEN7_FF_THREAD_MODE
,
6778 I915_READ(GEN7_FF_THREAD_MODE
) &
6779 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
6781 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
6782 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
6784 /* WaDisableSDEUnitClockGating:bdw */
6785 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
6786 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
6788 /* WaProgramL3SqcReg1Default:bdw */
6789 gen8_set_l3sqc_credits(dev_priv
, 30, 2);
6791 /* WaKVMNotificationOnConfigChange:bdw */
6792 I915_WRITE(CHICKEN_PAR2_1
, I915_READ(CHICKEN_PAR2_1
)
6793 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT
);
6795 lpt_init_clock_gating(dev_priv
);
6797 /* WaDisableDopClockGating:bdw
6799 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
6802 I915_WRITE(GEN6_UCGCTL1
,
6803 I915_READ(GEN6_UCGCTL1
) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE
);
6806 static void hsw_init_clock_gating(struct drm_i915_private
*dev_priv
)
6808 /* L3 caching of data atomics doesn't work -- disable it. */
6809 I915_WRITE(HSW_SCRATCH1
, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
);
6810 I915_WRITE(HSW_ROW_CHICKEN3
,
6811 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
));
6813 /* This is required by WaCatErrorRejectionIssue:hsw */
6814 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
6815 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
6816 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
6818 /* WaVSRefCountFullforceMissDisable:hsw */
6819 I915_WRITE(GEN7_FF_THREAD_MODE
,
6820 I915_READ(GEN7_FF_THREAD_MODE
) & ~GEN7_FF_VS_REF_CNT_FFME
);
6822 /* WaDisable_RenderCache_OperationalFlush:hsw */
6823 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6825 /* enable HiZ Raw Stall Optimization */
6826 I915_WRITE(CACHE_MODE_0_GEN7
,
6827 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
6829 /* WaDisable4x2SubspanOptimization:hsw */
6830 I915_WRITE(CACHE_MODE_1
,
6831 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
6834 * BSpec recommends 8x4 when MSAA is used,
6835 * however in practice 16x4 seems fastest.
6837 * Note that PS/WM thread counts depend on the WIZ hashing
6838 * disable bit, which we don't touch here, but it's good
6839 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6841 I915_WRITE(GEN7_GT_MODE
,
6842 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
6844 /* WaSampleCChickenBitEnable:hsw */
6845 I915_WRITE(HALF_SLICE_CHICKEN3
,
6846 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE
));
6848 /* WaSwitchSolVfFArbitrationPriority:hsw */
6849 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
6851 lpt_init_clock_gating(dev_priv
);
6854 static void ivb_init_clock_gating(struct drm_i915_private
*dev_priv
)
6858 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
6860 /* WaDisableEarlyCull:ivb */
6861 I915_WRITE(_3D_CHICKEN3
,
6862 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
6864 /* WaDisableBackToBackFlipFix:ivb */
6865 I915_WRITE(IVB_CHICKEN3
,
6866 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
6867 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
6869 /* WaDisablePSDDualDispatchEnable:ivb */
6870 if (IS_IVB_GT1(dev_priv
))
6871 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
6872 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
6874 /* WaDisable_RenderCache_OperationalFlush:ivb */
6875 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6877 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6878 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
6879 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
6881 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6882 I915_WRITE(GEN7_L3CNTLREG1
,
6883 GEN7_WA_FOR_GEN7_L3_CONTROL
);
6884 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
6885 GEN7_WA_L3_CHICKEN_MODE
);
6886 if (IS_IVB_GT1(dev_priv
))
6887 I915_WRITE(GEN7_ROW_CHICKEN2
,
6888 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6890 /* must write both registers */
6891 I915_WRITE(GEN7_ROW_CHICKEN2
,
6892 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6893 I915_WRITE(GEN7_ROW_CHICKEN2_GT2
,
6894 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6897 /* WaForceL3Serialization:ivb */
6898 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
6899 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
6902 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6903 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
6905 I915_WRITE(GEN6_UCGCTL2
,
6906 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
6908 /* This is required by WaCatErrorRejectionIssue:ivb */
6909 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
6910 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
6911 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
6913 g4x_disable_trickle_feed(dev_priv
);
6915 gen7_setup_fixed_func_scheduler(dev_priv
);
6917 if (0) { /* causes HiZ corruption on ivb:gt1 */
6918 /* enable HiZ Raw Stall Optimization */
6919 I915_WRITE(CACHE_MODE_0_GEN7
,
6920 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
6923 /* WaDisable4x2SubspanOptimization:ivb */
6924 I915_WRITE(CACHE_MODE_1
,
6925 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
6928 * BSpec recommends 8x4 when MSAA is used,
6929 * however in practice 16x4 seems fastest.
6931 * Note that PS/WM thread counts depend on the WIZ hashing
6932 * disable bit, which we don't touch here, but it's good
6933 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6935 I915_WRITE(GEN7_GT_MODE
,
6936 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
6938 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
6939 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
6940 snpcr
|= GEN6_MBC_SNPCR_MED
;
6941 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
6943 if (!HAS_PCH_NOP(dev_priv
))
6944 cpt_init_clock_gating(dev_priv
);
6946 gen6_check_mch_setup(dev_priv
);
6949 static void vlv_init_clock_gating(struct drm_i915_private
*dev_priv
)
6951 /* WaDisableEarlyCull:vlv */
6952 I915_WRITE(_3D_CHICKEN3
,
6953 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
6955 /* WaDisableBackToBackFlipFix:vlv */
6956 I915_WRITE(IVB_CHICKEN3
,
6957 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
6958 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
6960 /* WaPsdDispatchEnable:vlv */
6961 /* WaDisablePSDDualDispatchEnable:vlv */
6962 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
6963 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP
|
6964 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
6966 /* WaDisable_RenderCache_OperationalFlush:vlv */
6967 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
6969 /* WaForceL3Serialization:vlv */
6970 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
6971 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
6973 /* WaDisableDopClockGating:vlv */
6974 I915_WRITE(GEN7_ROW_CHICKEN2
,
6975 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
6977 /* This is required by WaCatErrorRejectionIssue:vlv */
6978 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
6979 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
6980 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
6982 gen7_setup_fixed_func_scheduler(dev_priv
);
6985 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6986 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
6988 I915_WRITE(GEN6_UCGCTL2
,
6989 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
6991 /* WaDisableL3Bank2xClockGate:vlv
6992 * Disabling L3 clock gating- MMIO 940c[25] = 1
6993 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6994 I915_WRITE(GEN7_UCGCTL4
,
6995 I915_READ(GEN7_UCGCTL4
) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
6998 * BSpec says this must be set, even though
6999 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7001 I915_WRITE(CACHE_MODE_1
,
7002 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7005 * BSpec recommends 8x4 when MSAA is used,
7006 * however in practice 16x4 seems fastest.
7008 * Note that PS/WM thread counts depend on the WIZ hashing
7009 * disable bit, which we don't touch here, but it's good
7010 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7012 I915_WRITE(GEN7_GT_MODE
,
7013 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7016 * WaIncreaseL3CreditsForVLVB0:vlv
7017 * This is the hardware default actually.
7019 I915_WRITE(GEN7_L3SQCREG1
, VLV_B0_WA_L3SQCREG1_VALUE
);
7022 * WaDisableVLVClockGating_VBIIssue:vlv
7023 * Disable clock gating on th GCFG unit to prevent a delay
7024 * in the reporting of vblank events.
7026 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, GCFG_DIS
);
7029 static void chv_init_clock_gating(struct drm_i915_private
*dev_priv
)
7031 /* WaVSRefCountFullforceMissDisable:chv */
7032 /* WaDSRefCountFullforceMissDisable:chv */
7033 I915_WRITE(GEN7_FF_THREAD_MODE
,
7034 I915_READ(GEN7_FF_THREAD_MODE
) &
7035 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
7037 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7038 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
7039 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
7041 /* WaDisableCSUnitClockGating:chv */
7042 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
7043 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
7045 /* WaDisableSDEUnitClockGating:chv */
7046 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
7047 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
7050 * WaProgramL3SqcReg1Default:chv
7051 * See gfxspecs/Related Documents/Performance Guide/
7052 * LSQC Setting Recommendations.
7054 gen8_set_l3sqc_credits(dev_priv
, 38, 2);
7057 static void g4x_init_clock_gating(struct drm_i915_private
*dev_priv
)
7061 I915_WRITE(RENCLK_GATE_D1
, 0);
7062 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
7063 GS_UNIT_CLOCK_GATE_DISABLE
|
7064 CL_UNIT_CLOCK_GATE_DISABLE
);
7065 I915_WRITE(RAMCLK_GATE_D
, 0);
7066 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
7067 OVRUNIT_CLOCK_GATE_DISABLE
|
7068 OVCUNIT_CLOCK_GATE_DISABLE
;
7069 if (IS_GM45(dev_priv
))
7070 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
7071 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
7073 /* WaDisableRenderCachePipelinedFlush */
7074 I915_WRITE(CACHE_MODE_0
,
7075 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
7077 /* WaDisable_RenderCache_OperationalFlush:g4x */
7078 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7080 g4x_disable_trickle_feed(dev_priv
);
7083 static void i965gm_init_clock_gating(struct drm_i915_private
*dev_priv
)
7085 struct intel_uncore
*uncore
= &dev_priv
->uncore
;
7087 intel_uncore_write(uncore
, RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
7088 intel_uncore_write(uncore
, RENCLK_GATE_D2
, 0);
7089 intel_uncore_write(uncore
, DSPCLK_GATE_D
, 0);
7090 intel_uncore_write(uncore
, RAMCLK_GATE_D
, 0);
7091 intel_uncore_write16(uncore
, DEUC
, 0);
7092 intel_uncore_write(uncore
,
7094 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7096 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7097 intel_uncore_write(uncore
,
7099 _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7102 static void i965g_init_clock_gating(struct drm_i915_private
*dev_priv
)
7104 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
7105 I965_RCC_CLOCK_GATE_DISABLE
|
7106 I965_RCPB_CLOCK_GATE_DISABLE
|
7107 I965_ISC_CLOCK_GATE_DISABLE
|
7108 I965_FBC_CLOCK_GATE_DISABLE
);
7109 I915_WRITE(RENCLK_GATE_D2
, 0);
7110 I915_WRITE(MI_ARB_STATE
,
7111 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7113 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7114 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7117 static void gen3_init_clock_gating(struct drm_i915_private
*dev_priv
)
7119 u32 dstate
= I915_READ(D_STATE
);
7121 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
7122 DSTATE_DOT_CLOCK_GATING
;
7123 I915_WRITE(D_STATE
, dstate
);
7125 if (IS_PINEVIEW(dev_priv
))
7126 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
7128 /* IIR "flip pending" means done if this bit is set */
7129 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
7131 /* interrupts should cause a wake up from C3 */
7132 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN
));
7134 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7135 I915_WRITE(MI_ARB_STATE
, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE
));
7137 I915_WRITE(MI_ARB_STATE
,
7138 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7141 static void i85x_init_clock_gating(struct drm_i915_private
*dev_priv
)
7143 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
7145 /* interrupts should cause a wake up from C3 */
7146 I915_WRITE(MI_STATE
, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN
) |
7147 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE
));
7149 I915_WRITE(MEM_MODE
,
7150 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE
));
7153 static void i830_init_clock_gating(struct drm_i915_private
*dev_priv
)
7155 I915_WRITE(MEM_MODE
,
7156 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE
) |
7157 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE
));
7160 void intel_init_clock_gating(struct drm_i915_private
*dev_priv
)
7162 dev_priv
->display
.init_clock_gating(dev_priv
);
7165 void intel_suspend_hw(struct drm_i915_private
*dev_priv
)
7167 if (HAS_PCH_LPT(dev_priv
))
7168 lpt_suspend_hw(dev_priv
);
7171 static void nop_init_clock_gating(struct drm_i915_private
*dev_priv
)
7173 drm_dbg_kms(&dev_priv
->drm
,
7174 "No clock gating settings or workarounds applied.\n");
7178 * intel_init_clock_gating_hooks - setup the clock gating hooks
7179 * @dev_priv: device private
7181 * Setup the hooks that configure which clocks of a given platform can be
7182 * gated and also apply various GT and display specific workarounds for these
7183 * platforms. Note that some GT specific workarounds are applied separately
7184 * when GPU contexts or batchbuffers start their execution.
7186 void intel_init_clock_gating_hooks(struct drm_i915_private
*dev_priv
)
7188 if (IS_GEN(dev_priv
, 12))
7189 dev_priv
->display
.init_clock_gating
= tgl_init_clock_gating
;
7190 else if (IS_GEN(dev_priv
, 11))
7191 dev_priv
->display
.init_clock_gating
= icl_init_clock_gating
;
7192 else if (IS_CANNONLAKE(dev_priv
))
7193 dev_priv
->display
.init_clock_gating
= cnl_init_clock_gating
;
7194 else if (IS_COFFEELAKE(dev_priv
))
7195 dev_priv
->display
.init_clock_gating
= cfl_init_clock_gating
;
7196 else if (IS_SKYLAKE(dev_priv
))
7197 dev_priv
->display
.init_clock_gating
= skl_init_clock_gating
;
7198 else if (IS_KABYLAKE(dev_priv
))
7199 dev_priv
->display
.init_clock_gating
= kbl_init_clock_gating
;
7200 else if (IS_BROXTON(dev_priv
))
7201 dev_priv
->display
.init_clock_gating
= bxt_init_clock_gating
;
7202 else if (IS_GEMINILAKE(dev_priv
))
7203 dev_priv
->display
.init_clock_gating
= glk_init_clock_gating
;
7204 else if (IS_BROADWELL(dev_priv
))
7205 dev_priv
->display
.init_clock_gating
= bdw_init_clock_gating
;
7206 else if (IS_CHERRYVIEW(dev_priv
))
7207 dev_priv
->display
.init_clock_gating
= chv_init_clock_gating
;
7208 else if (IS_HASWELL(dev_priv
))
7209 dev_priv
->display
.init_clock_gating
= hsw_init_clock_gating
;
7210 else if (IS_IVYBRIDGE(dev_priv
))
7211 dev_priv
->display
.init_clock_gating
= ivb_init_clock_gating
;
7212 else if (IS_VALLEYVIEW(dev_priv
))
7213 dev_priv
->display
.init_clock_gating
= vlv_init_clock_gating
;
7214 else if (IS_GEN(dev_priv
, 6))
7215 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
7216 else if (IS_GEN(dev_priv
, 5))
7217 dev_priv
->display
.init_clock_gating
= ilk_init_clock_gating
;
7218 else if (IS_G4X(dev_priv
))
7219 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
7220 else if (IS_I965GM(dev_priv
))
7221 dev_priv
->display
.init_clock_gating
= i965gm_init_clock_gating
;
7222 else if (IS_I965G(dev_priv
))
7223 dev_priv
->display
.init_clock_gating
= i965g_init_clock_gating
;
7224 else if (IS_GEN(dev_priv
, 3))
7225 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
7226 else if (IS_I85X(dev_priv
) || IS_I865G(dev_priv
))
7227 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
7228 else if (IS_GEN(dev_priv
, 2))
7229 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
7231 MISSING_CASE(INTEL_DEVID(dev_priv
));
7232 dev_priv
->display
.init_clock_gating
= nop_init_clock_gating
;
7236 /* Set up chip specific power management-related functions */
7237 void intel_init_pm(struct drm_i915_private
*dev_priv
)
7240 if (IS_PINEVIEW(dev_priv
))
7241 pnv_get_mem_freq(dev_priv
);
7242 else if (IS_GEN(dev_priv
, 5))
7243 ilk_get_mem_freq(dev_priv
);
7245 if (intel_has_sagv(dev_priv
))
7246 skl_setup_sagv_block_time(dev_priv
);
7248 /* For FIFO watermark updates */
7249 if (INTEL_GEN(dev_priv
) >= 9) {
7250 skl_setup_wm_latency(dev_priv
);
7251 dev_priv
->display
.initial_watermarks
= skl_initial_wm
;
7252 dev_priv
->display
.atomic_update_watermarks
= skl_atomic_update_crtc_wm
;
7253 dev_priv
->display
.compute_global_watermarks
= skl_compute_wm
;
7254 } else if (HAS_PCH_SPLIT(dev_priv
)) {
7255 ilk_setup_wm_latency(dev_priv
);
7257 if ((IS_GEN(dev_priv
, 5) && dev_priv
->wm
.pri_latency
[1] &&
7258 dev_priv
->wm
.spr_latency
[1] && dev_priv
->wm
.cur_latency
[1]) ||
7259 (!IS_GEN(dev_priv
, 5) && dev_priv
->wm
.pri_latency
[0] &&
7260 dev_priv
->wm
.spr_latency
[0] && dev_priv
->wm
.cur_latency
[0])) {
7261 dev_priv
->display
.compute_pipe_wm
= ilk_compute_pipe_wm
;
7262 dev_priv
->display
.compute_intermediate_wm
=
7263 ilk_compute_intermediate_wm
;
7264 dev_priv
->display
.initial_watermarks
=
7265 ilk_initial_watermarks
;
7266 dev_priv
->display
.optimize_watermarks
=
7267 ilk_optimize_watermarks
;
7269 drm_dbg_kms(&dev_priv
->drm
,
7270 "Failed to read display plane latency. "
7273 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
7274 vlv_setup_wm_latency(dev_priv
);
7275 dev_priv
->display
.compute_pipe_wm
= vlv_compute_pipe_wm
;
7276 dev_priv
->display
.compute_intermediate_wm
= vlv_compute_intermediate_wm
;
7277 dev_priv
->display
.initial_watermarks
= vlv_initial_watermarks
;
7278 dev_priv
->display
.optimize_watermarks
= vlv_optimize_watermarks
;
7279 dev_priv
->display
.atomic_update_watermarks
= vlv_atomic_update_fifo
;
7280 } else if (IS_G4X(dev_priv
)) {
7281 g4x_setup_wm_latency(dev_priv
);
7282 dev_priv
->display
.compute_pipe_wm
= g4x_compute_pipe_wm
;
7283 dev_priv
->display
.compute_intermediate_wm
= g4x_compute_intermediate_wm
;
7284 dev_priv
->display
.initial_watermarks
= g4x_initial_watermarks
;
7285 dev_priv
->display
.optimize_watermarks
= g4x_optimize_watermarks
;
7286 } else if (IS_PINEVIEW(dev_priv
)) {
7287 if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv
),
7290 dev_priv
->mem_freq
)) {
7291 drm_info(&dev_priv
->drm
,
7292 "failed to find known CxSR latency "
7293 "(found ddr%s fsb freq %d, mem freq %d), "
7295 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
7296 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
7297 /* Disable CxSR and never update its watermark again */
7298 intel_set_memory_cxsr(dev_priv
, false);
7299 dev_priv
->display
.update_wm
= NULL
;
7301 dev_priv
->display
.update_wm
= pnv_update_wm
;
7302 } else if (IS_GEN(dev_priv
, 4)) {
7303 dev_priv
->display
.update_wm
= i965_update_wm
;
7304 } else if (IS_GEN(dev_priv
, 3)) {
7305 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7306 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
7307 } else if (IS_GEN(dev_priv
, 2)) {
7308 if (INTEL_NUM_PIPES(dev_priv
) == 1) {
7309 dev_priv
->display
.update_wm
= i845_update_wm
;
7310 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
7312 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7313 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
7316 drm_err(&dev_priv
->drm
,
7317 "unexpected fall-through in %s\n", __func__
);
7321 void intel_pm_setup(struct drm_i915_private
*dev_priv
)
7323 dev_priv
->runtime_pm
.suspended
= false;
7324 atomic_set(&dev_priv
->runtime_pm
.wakeref_count
, 0);