Linux 4.2.1
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / intel_pm.c
blobeadc15cddbeb45d00dcc9cd49a0df6c7d379d895
1 /*
2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include "i915_drv.h"
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
34 /**
35 * RC6 is a special power stage which allows the GPU to enter an very
36 * low-voltage mode when idle, using down to 0V while at this stage. This
37 * stage is entered automatically when the GPU is idle when RC6 support is
38 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
40 * There are different RC6 modes available in Intel GPU, which differentiate
41 * among each other with the latency required to enter and leave RC6 and
42 * voltage consumed by the GPU in different states.
44 * The combination of the following flags define which states GPU is allowed
45 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
46 * RC6pp is deepest RC6. Their support by hardware varies according to the
47 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
48 * which brings the most power savings; deeper states save more power, but
49 * require higher latency to switch to and wake up.
51 #define INTEL_RC6_ENABLE (1<<0)
52 #define INTEL_RC6p_ENABLE (1<<1)
53 #define INTEL_RC6pp_ENABLE (1<<2)
55 static void gen9_init_clock_gating(struct drm_device *dev)
57 struct drm_i915_private *dev_priv = dev->dev_private;
59 /* WaEnableLbsSlaRetryTimerDecrement:skl */
60 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
61 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
64 static void skl_init_clock_gating(struct drm_device *dev)
66 struct drm_i915_private *dev_priv = dev->dev_private;
68 gen9_init_clock_gating(dev);
70 if (INTEL_REVID(dev) <= SKL_REVID_B0) {
72 * WaDisableSDEUnitClockGating:skl
73 * WaSetGAPSunitClckGateDisable:skl
75 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
76 GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
77 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
79 /* WaDisableVFUnitClockGating:skl */
80 I915_WRITE(GEN6_UCGCTL2, I915_READ(GEN6_UCGCTL2) |
81 GEN6_VFUNIT_CLOCK_GATE_DISABLE);
84 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
85 /* WaDisableHDCInvalidation:skl */
86 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
87 BDW_DISABLE_HDC_INVALIDATION);
89 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
90 I915_WRITE(FF_SLICE_CS_CHICKEN2,
91 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
94 if (INTEL_REVID(dev) <= SKL_REVID_E0)
95 /* WaDisableLSQCROPERFforOCL:skl */
96 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
97 GEN8_LQSC_RO_PERF_DIS);
100 static void bxt_init_clock_gating(struct drm_device *dev)
102 struct drm_i915_private *dev_priv = dev->dev_private;
104 gen9_init_clock_gating(dev);
107 * FIXME:
108 * GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only.
109 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
111 /* WaDisableSDEUnitClockGating:bxt */
112 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
113 GEN8_SDEUNIT_CLOCK_GATE_DISABLE |
114 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
116 /* FIXME: apply on A0 only */
117 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
120 static void i915_pineview_get_mem_freq(struct drm_device *dev)
122 struct drm_i915_private *dev_priv = dev->dev_private;
123 u32 tmp;
125 tmp = I915_READ(CLKCFG);
127 switch (tmp & CLKCFG_FSB_MASK) {
128 case CLKCFG_FSB_533:
129 dev_priv->fsb_freq = 533; /* 133*4 */
130 break;
131 case CLKCFG_FSB_800:
132 dev_priv->fsb_freq = 800; /* 200*4 */
133 break;
134 case CLKCFG_FSB_667:
135 dev_priv->fsb_freq = 667; /* 167*4 */
136 break;
137 case CLKCFG_FSB_400:
138 dev_priv->fsb_freq = 400; /* 100*4 */
139 break;
142 switch (tmp & CLKCFG_MEM_MASK) {
143 case CLKCFG_MEM_533:
144 dev_priv->mem_freq = 533;
145 break;
146 case CLKCFG_MEM_667:
147 dev_priv->mem_freq = 667;
148 break;
149 case CLKCFG_MEM_800:
150 dev_priv->mem_freq = 800;
151 break;
154 /* detect pineview DDR3 setting */
155 tmp = I915_READ(CSHRDDR3CTL);
156 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
159 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
161 struct drm_i915_private *dev_priv = dev->dev_private;
162 u16 ddrpll, csipll;
164 ddrpll = I915_READ16(DDRMPLL1);
165 csipll = I915_READ16(CSIPLL0);
167 switch (ddrpll & 0xff) {
168 case 0xc:
169 dev_priv->mem_freq = 800;
170 break;
171 case 0x10:
172 dev_priv->mem_freq = 1066;
173 break;
174 case 0x14:
175 dev_priv->mem_freq = 1333;
176 break;
177 case 0x18:
178 dev_priv->mem_freq = 1600;
179 break;
180 default:
181 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
182 ddrpll & 0xff);
183 dev_priv->mem_freq = 0;
184 break;
187 dev_priv->ips.r_t = dev_priv->mem_freq;
189 switch (csipll & 0x3ff) {
190 case 0x00c:
191 dev_priv->fsb_freq = 3200;
192 break;
193 case 0x00e:
194 dev_priv->fsb_freq = 3733;
195 break;
196 case 0x010:
197 dev_priv->fsb_freq = 4266;
198 break;
199 case 0x012:
200 dev_priv->fsb_freq = 4800;
201 break;
202 case 0x014:
203 dev_priv->fsb_freq = 5333;
204 break;
205 case 0x016:
206 dev_priv->fsb_freq = 5866;
207 break;
208 case 0x018:
209 dev_priv->fsb_freq = 6400;
210 break;
211 default:
212 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
213 csipll & 0x3ff);
214 dev_priv->fsb_freq = 0;
215 break;
218 if (dev_priv->fsb_freq == 3200) {
219 dev_priv->ips.c_m = 0;
220 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
221 dev_priv->ips.c_m = 1;
222 } else {
223 dev_priv->ips.c_m = 2;
227 static const struct cxsr_latency cxsr_latency_table[] = {
228 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
229 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
230 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
231 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
232 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
234 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
235 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
236 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
237 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
238 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
240 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
241 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
242 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
243 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
244 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
246 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
247 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
248 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
249 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
250 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
252 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
253 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
254 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
255 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
256 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
258 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
259 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
260 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
261 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
262 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
265 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
266 int is_ddr3,
267 int fsb,
268 int mem)
270 const struct cxsr_latency *latency;
271 int i;
273 if (fsb == 0 || mem == 0)
274 return NULL;
276 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
277 latency = &cxsr_latency_table[i];
278 if (is_desktop == latency->is_desktop &&
279 is_ddr3 == latency->is_ddr3 &&
280 fsb == latency->fsb_freq && mem == latency->mem_freq)
281 return latency;
284 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
286 return NULL;
289 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
291 u32 val;
293 mutex_lock(&dev_priv->rps.hw_lock);
295 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
296 if (enable)
297 val &= ~FORCE_DDR_HIGH_FREQ;
298 else
299 val |= FORCE_DDR_HIGH_FREQ;
300 val &= ~FORCE_DDR_LOW_FREQ;
301 val |= FORCE_DDR_FREQ_REQ_ACK;
302 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
304 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
305 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
306 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
308 mutex_unlock(&dev_priv->rps.hw_lock);
311 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
313 u32 val;
315 mutex_lock(&dev_priv->rps.hw_lock);
317 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
318 if (enable)
319 val |= DSP_MAXFIFO_PM5_ENABLE;
320 else
321 val &= ~DSP_MAXFIFO_PM5_ENABLE;
322 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
324 mutex_unlock(&dev_priv->rps.hw_lock);
327 #define FW_WM(value, plane) \
328 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
330 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
332 struct drm_device *dev = dev_priv->dev;
333 u32 val;
335 if (IS_VALLEYVIEW(dev)) {
336 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
337 if (IS_CHERRYVIEW(dev))
338 chv_set_memory_pm5(dev_priv, enable);
339 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
340 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
341 } else if (IS_PINEVIEW(dev)) {
342 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
343 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
344 I915_WRITE(DSPFW3, val);
345 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
346 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
347 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
348 I915_WRITE(FW_BLC_SELF, val);
349 } else if (IS_I915GM(dev)) {
350 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
351 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
352 I915_WRITE(INSTPM, val);
353 } else {
354 return;
357 DRM_DEBUG_KMS("memory self-refresh is %s\n",
358 enable ? "enabled" : "disabled");
363 * Latency for FIFO fetches is dependent on several factors:
364 * - memory configuration (speed, channels)
365 * - chipset
366 * - current MCH state
367 * It can be fairly high in some situations, so here we assume a fairly
368 * pessimal value. It's a tradeoff between extra memory fetches (if we
369 * set this value too high, the FIFO will fetch frequently to stay full)
370 * and power consumption (set it too low to save power and we might see
371 * FIFO underruns and display "flicker").
373 * A value of 5us seems to be a good balance; safe for very low end
374 * platforms but not overly aggressive on lower latency configs.
376 static const int pessimal_latency_ns = 5000;
378 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
379 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
381 static int vlv_get_fifo_size(struct drm_device *dev,
382 enum pipe pipe, int plane)
384 struct drm_i915_private *dev_priv = dev->dev_private;
385 int sprite0_start, sprite1_start, size;
387 switch (pipe) {
388 uint32_t dsparb, dsparb2, dsparb3;
389 case PIPE_A:
390 dsparb = I915_READ(DSPARB);
391 dsparb2 = I915_READ(DSPARB2);
392 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
393 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
394 break;
395 case PIPE_B:
396 dsparb = I915_READ(DSPARB);
397 dsparb2 = I915_READ(DSPARB2);
398 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
399 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
400 break;
401 case PIPE_C:
402 dsparb2 = I915_READ(DSPARB2);
403 dsparb3 = I915_READ(DSPARB3);
404 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
405 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
406 break;
407 default:
408 return 0;
411 switch (plane) {
412 case 0:
413 size = sprite0_start;
414 break;
415 case 1:
416 size = sprite1_start - sprite0_start;
417 break;
418 case 2:
419 size = 512 - 1 - sprite1_start;
420 break;
421 default:
422 return 0;
425 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
426 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
427 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
428 size);
430 return size;
433 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
435 struct drm_i915_private *dev_priv = dev->dev_private;
436 uint32_t dsparb = I915_READ(DSPARB);
437 int size;
439 size = dsparb & 0x7f;
440 if (plane)
441 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
443 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
444 plane ? "B" : "A", size);
446 return size;
449 static int i830_get_fifo_size(struct drm_device *dev, int plane)
451 struct drm_i915_private *dev_priv = dev->dev_private;
452 uint32_t dsparb = I915_READ(DSPARB);
453 int size;
455 size = dsparb & 0x1ff;
456 if (plane)
457 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
458 size >>= 1; /* Convert to cachelines */
460 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
461 plane ? "B" : "A", size);
463 return size;
466 static int i845_get_fifo_size(struct drm_device *dev, int plane)
468 struct drm_i915_private *dev_priv = dev->dev_private;
469 uint32_t dsparb = I915_READ(DSPARB);
470 int size;
472 size = dsparb & 0x7f;
473 size >>= 2; /* Convert to cachelines */
475 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
476 plane ? "B" : "A",
477 size);
479 return size;
482 /* Pineview has different values for various configs */
483 static const struct intel_watermark_params pineview_display_wm = {
484 .fifo_size = PINEVIEW_DISPLAY_FIFO,
485 .max_wm = PINEVIEW_MAX_WM,
486 .default_wm = PINEVIEW_DFT_WM,
487 .guard_size = PINEVIEW_GUARD_WM,
488 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
490 static const struct intel_watermark_params pineview_display_hplloff_wm = {
491 .fifo_size = PINEVIEW_DISPLAY_FIFO,
492 .max_wm = PINEVIEW_MAX_WM,
493 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
494 .guard_size = PINEVIEW_GUARD_WM,
495 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
497 static const struct intel_watermark_params pineview_cursor_wm = {
498 .fifo_size = PINEVIEW_CURSOR_FIFO,
499 .max_wm = PINEVIEW_CURSOR_MAX_WM,
500 .default_wm = PINEVIEW_CURSOR_DFT_WM,
501 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
502 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
504 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
505 .fifo_size = PINEVIEW_CURSOR_FIFO,
506 .max_wm = PINEVIEW_CURSOR_MAX_WM,
507 .default_wm = PINEVIEW_CURSOR_DFT_WM,
508 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
509 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
511 static const struct intel_watermark_params g4x_wm_info = {
512 .fifo_size = G4X_FIFO_SIZE,
513 .max_wm = G4X_MAX_WM,
514 .default_wm = G4X_MAX_WM,
515 .guard_size = 2,
516 .cacheline_size = G4X_FIFO_LINE_SIZE,
518 static const struct intel_watermark_params g4x_cursor_wm_info = {
519 .fifo_size = I965_CURSOR_FIFO,
520 .max_wm = I965_CURSOR_MAX_WM,
521 .default_wm = I965_CURSOR_DFT_WM,
522 .guard_size = 2,
523 .cacheline_size = G4X_FIFO_LINE_SIZE,
525 static const struct intel_watermark_params valleyview_wm_info = {
526 .fifo_size = VALLEYVIEW_FIFO_SIZE,
527 .max_wm = VALLEYVIEW_MAX_WM,
528 .default_wm = VALLEYVIEW_MAX_WM,
529 .guard_size = 2,
530 .cacheline_size = G4X_FIFO_LINE_SIZE,
532 static const struct intel_watermark_params valleyview_cursor_wm_info = {
533 .fifo_size = I965_CURSOR_FIFO,
534 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
535 .default_wm = I965_CURSOR_DFT_WM,
536 .guard_size = 2,
537 .cacheline_size = G4X_FIFO_LINE_SIZE,
539 static const struct intel_watermark_params i965_cursor_wm_info = {
540 .fifo_size = I965_CURSOR_FIFO,
541 .max_wm = I965_CURSOR_MAX_WM,
542 .default_wm = I965_CURSOR_DFT_WM,
543 .guard_size = 2,
544 .cacheline_size = I915_FIFO_LINE_SIZE,
546 static const struct intel_watermark_params i945_wm_info = {
547 .fifo_size = I945_FIFO_SIZE,
548 .max_wm = I915_MAX_WM,
549 .default_wm = 1,
550 .guard_size = 2,
551 .cacheline_size = I915_FIFO_LINE_SIZE,
553 static const struct intel_watermark_params i915_wm_info = {
554 .fifo_size = I915_FIFO_SIZE,
555 .max_wm = I915_MAX_WM,
556 .default_wm = 1,
557 .guard_size = 2,
558 .cacheline_size = I915_FIFO_LINE_SIZE,
560 static const struct intel_watermark_params i830_a_wm_info = {
561 .fifo_size = I855GM_FIFO_SIZE,
562 .max_wm = I915_MAX_WM,
563 .default_wm = 1,
564 .guard_size = 2,
565 .cacheline_size = I830_FIFO_LINE_SIZE,
567 static const struct intel_watermark_params i830_bc_wm_info = {
568 .fifo_size = I855GM_FIFO_SIZE,
569 .max_wm = I915_MAX_WM/2,
570 .default_wm = 1,
571 .guard_size = 2,
572 .cacheline_size = I830_FIFO_LINE_SIZE,
574 static const struct intel_watermark_params i845_wm_info = {
575 .fifo_size = I830_FIFO_SIZE,
576 .max_wm = I915_MAX_WM,
577 .default_wm = 1,
578 .guard_size = 2,
579 .cacheline_size = I830_FIFO_LINE_SIZE,
583 * intel_calculate_wm - calculate watermark level
584 * @clock_in_khz: pixel clock
585 * @wm: chip FIFO params
586 * @pixel_size: display pixel size
587 * @latency_ns: memory latency for the platform
589 * Calculate the watermark level (the level at which the display plane will
590 * start fetching from memory again). Each chip has a different display
591 * FIFO size and allocation, so the caller needs to figure that out and pass
592 * in the correct intel_watermark_params structure.
594 * As the pixel clock runs, the FIFO will be drained at a rate that depends
595 * on the pixel size. When it reaches the watermark level, it'll start
596 * fetching FIFO line sized based chunks from memory until the FIFO fills
597 * past the watermark point. If the FIFO drains completely, a FIFO underrun
598 * will occur, and a display engine hang could result.
600 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
601 const struct intel_watermark_params *wm,
602 int fifo_size,
603 int pixel_size,
604 unsigned long latency_ns)
606 long entries_required, wm_size;
609 * Note: we need to make sure we don't overflow for various clock &
610 * latency values.
611 * clocks go from a few thousand to several hundred thousand.
612 * latency is usually a few thousand
614 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
615 1000;
616 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
618 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
620 wm_size = fifo_size - (entries_required + wm->guard_size);
622 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
624 /* Don't promote wm_size to unsigned... */
625 if (wm_size > (long)wm->max_wm)
626 wm_size = wm->max_wm;
627 if (wm_size <= 0)
628 wm_size = wm->default_wm;
631 * Bspec seems to indicate that the value shouldn't be lower than
632 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
633 * Lets go for 8 which is the burst size since certain platforms
634 * already use a hardcoded 8 (which is what the spec says should be
635 * done).
637 if (wm_size <= 8)
638 wm_size = 8;
640 return wm_size;
643 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
645 struct drm_crtc *crtc, *enabled = NULL;
647 for_each_crtc(dev, crtc) {
648 if (intel_crtc_active(crtc)) {
649 if (enabled)
650 return NULL;
651 enabled = crtc;
655 return enabled;
658 static void pineview_update_wm(struct drm_crtc *unused_crtc)
660 struct drm_device *dev = unused_crtc->dev;
661 struct drm_i915_private *dev_priv = dev->dev_private;
662 struct drm_crtc *crtc;
663 const struct cxsr_latency *latency;
664 u32 reg;
665 unsigned long wm;
667 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
668 dev_priv->fsb_freq, dev_priv->mem_freq);
669 if (!latency) {
670 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
671 intel_set_memory_cxsr(dev_priv, false);
672 return;
675 crtc = single_enabled_crtc(dev);
676 if (crtc) {
677 const struct drm_display_mode *adjusted_mode;
678 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
679 int clock;
681 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
682 clock = adjusted_mode->crtc_clock;
684 /* Display SR */
685 wm = intel_calculate_wm(clock, &pineview_display_wm,
686 pineview_display_wm.fifo_size,
687 pixel_size, latency->display_sr);
688 reg = I915_READ(DSPFW1);
689 reg &= ~DSPFW_SR_MASK;
690 reg |= FW_WM(wm, SR);
691 I915_WRITE(DSPFW1, reg);
692 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
694 /* cursor SR */
695 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
696 pineview_display_wm.fifo_size,
697 pixel_size, latency->cursor_sr);
698 reg = I915_READ(DSPFW3);
699 reg &= ~DSPFW_CURSOR_SR_MASK;
700 reg |= FW_WM(wm, CURSOR_SR);
701 I915_WRITE(DSPFW3, reg);
703 /* Display HPLL off SR */
704 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
705 pineview_display_hplloff_wm.fifo_size,
706 pixel_size, latency->display_hpll_disable);
707 reg = I915_READ(DSPFW3);
708 reg &= ~DSPFW_HPLL_SR_MASK;
709 reg |= FW_WM(wm, HPLL_SR);
710 I915_WRITE(DSPFW3, reg);
712 /* cursor HPLL off SR */
713 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
714 pineview_display_hplloff_wm.fifo_size,
715 pixel_size, latency->cursor_hpll_disable);
716 reg = I915_READ(DSPFW3);
717 reg &= ~DSPFW_HPLL_CURSOR_MASK;
718 reg |= FW_WM(wm, HPLL_CURSOR);
719 I915_WRITE(DSPFW3, reg);
720 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
722 intel_set_memory_cxsr(dev_priv, true);
723 } else {
724 intel_set_memory_cxsr(dev_priv, false);
728 static bool g4x_compute_wm0(struct drm_device *dev,
729 int plane,
730 const struct intel_watermark_params *display,
731 int display_latency_ns,
732 const struct intel_watermark_params *cursor,
733 int cursor_latency_ns,
734 int *plane_wm,
735 int *cursor_wm)
737 struct drm_crtc *crtc;
738 const struct drm_display_mode *adjusted_mode;
739 int htotal, hdisplay, clock, pixel_size;
740 int line_time_us, line_count;
741 int entries, tlb_miss;
743 crtc = intel_get_crtc_for_plane(dev, plane);
744 if (!intel_crtc_active(crtc)) {
745 *cursor_wm = cursor->guard_size;
746 *plane_wm = display->guard_size;
747 return false;
750 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
751 clock = adjusted_mode->crtc_clock;
752 htotal = adjusted_mode->crtc_htotal;
753 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
754 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
756 /* Use the small buffer method to calculate plane watermark */
757 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
758 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
759 if (tlb_miss > 0)
760 entries += tlb_miss;
761 entries = DIV_ROUND_UP(entries, display->cacheline_size);
762 *plane_wm = entries + display->guard_size;
763 if (*plane_wm > (int)display->max_wm)
764 *plane_wm = display->max_wm;
766 /* Use the large buffer method to calculate cursor watermark */
767 line_time_us = max(htotal * 1000 / clock, 1);
768 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
769 entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
770 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
771 if (tlb_miss > 0)
772 entries += tlb_miss;
773 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
774 *cursor_wm = entries + cursor->guard_size;
775 if (*cursor_wm > (int)cursor->max_wm)
776 *cursor_wm = (int)cursor->max_wm;
778 return true;
782 * Check the wm result.
784 * If any calculated watermark values is larger than the maximum value that
785 * can be programmed into the associated watermark register, that watermark
786 * must be disabled.
788 static bool g4x_check_srwm(struct drm_device *dev,
789 int display_wm, int cursor_wm,
790 const struct intel_watermark_params *display,
791 const struct intel_watermark_params *cursor)
793 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
794 display_wm, cursor_wm);
796 if (display_wm > display->max_wm) {
797 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
798 display_wm, display->max_wm);
799 return false;
802 if (cursor_wm > cursor->max_wm) {
803 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
804 cursor_wm, cursor->max_wm);
805 return false;
808 if (!(display_wm || cursor_wm)) {
809 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
810 return false;
813 return true;
816 static bool g4x_compute_srwm(struct drm_device *dev,
817 int plane,
818 int latency_ns,
819 const struct intel_watermark_params *display,
820 const struct intel_watermark_params *cursor,
821 int *display_wm, int *cursor_wm)
823 struct drm_crtc *crtc;
824 const struct drm_display_mode *adjusted_mode;
825 int hdisplay, htotal, pixel_size, clock;
826 unsigned long line_time_us;
827 int line_count, line_size;
828 int small, large;
829 int entries;
831 if (!latency_ns) {
832 *display_wm = *cursor_wm = 0;
833 return false;
836 crtc = intel_get_crtc_for_plane(dev, plane);
837 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
838 clock = adjusted_mode->crtc_clock;
839 htotal = adjusted_mode->crtc_htotal;
840 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
841 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
843 line_time_us = max(htotal * 1000 / clock, 1);
844 line_count = (latency_ns / line_time_us + 1000) / 1000;
845 line_size = hdisplay * pixel_size;
847 /* Use the minimum of the small and large buffer method for primary */
848 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
849 large = line_count * line_size;
851 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
852 *display_wm = entries + display->guard_size;
854 /* calculate the self-refresh watermark for display cursor */
855 entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
856 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
857 *cursor_wm = entries + cursor->guard_size;
859 return g4x_check_srwm(dev,
860 *display_wm, *cursor_wm,
861 display, cursor);
864 #define FW_WM_VLV(value, plane) \
865 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
867 static void vlv_write_wm_values(struct intel_crtc *crtc,
868 const struct vlv_wm_values *wm)
870 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
871 enum pipe pipe = crtc->pipe;
873 I915_WRITE(VLV_DDL(pipe),
874 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
875 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
876 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
877 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
879 I915_WRITE(DSPFW1,
880 FW_WM(wm->sr.plane, SR) |
881 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
882 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
883 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
884 I915_WRITE(DSPFW2,
885 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
886 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
887 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
888 I915_WRITE(DSPFW3,
889 FW_WM(wm->sr.cursor, CURSOR_SR));
891 if (IS_CHERRYVIEW(dev_priv)) {
892 I915_WRITE(DSPFW7_CHV,
893 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
894 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
895 I915_WRITE(DSPFW8_CHV,
896 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
897 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
898 I915_WRITE(DSPFW9_CHV,
899 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
900 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
901 I915_WRITE(DSPHOWM,
902 FW_WM(wm->sr.plane >> 9, SR_HI) |
903 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
904 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
905 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
906 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
907 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
908 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
909 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
910 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
911 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
912 } else {
913 I915_WRITE(DSPFW7,
914 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
915 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
916 I915_WRITE(DSPHOWM,
917 FW_WM(wm->sr.plane >> 9, SR_HI) |
918 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
919 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
920 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
921 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
922 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
923 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
926 POSTING_READ(DSPFW1);
928 dev_priv->wm.vlv = *wm;
931 #undef FW_WM_VLV
933 static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc,
934 struct drm_plane *plane)
936 struct drm_device *dev = crtc->dev;
937 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
938 int entries, prec_mult, drain_latency, pixel_size;
939 int clock = intel_crtc->config->base.adjusted_mode.crtc_clock;
940 const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64;
943 * FIXME the plane might have an fb
944 * but be invisible (eg. due to clipping)
946 if (!intel_crtc->active || !plane->state->fb)
947 return 0;
949 if (WARN(clock == 0, "Pixel clock is zero!\n"))
950 return 0;
952 pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
954 if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
955 return 0;
957 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
959 prec_mult = high_precision;
960 drain_latency = 64 * prec_mult * 4 / entries;
962 if (drain_latency > DRAIN_LATENCY_MASK) {
963 prec_mult /= 2;
964 drain_latency = 64 * prec_mult * 4 / entries;
967 if (drain_latency > DRAIN_LATENCY_MASK)
968 drain_latency = DRAIN_LATENCY_MASK;
970 return drain_latency | (prec_mult == high_precision ?
971 DDL_PRECISION_HIGH : DDL_PRECISION_LOW);
974 static int vlv_compute_wm(struct intel_crtc *crtc,
975 struct intel_plane *plane,
976 int fifo_size)
978 int clock, entries, pixel_size;
981 * FIXME the plane might have an fb
982 * but be invisible (eg. due to clipping)
984 if (!crtc->active || !plane->base.state->fb)
985 return 0;
987 pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0);
988 clock = crtc->config->base.adjusted_mode.crtc_clock;
990 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
993 * Set up the watermark such that we don't start issuing memory
994 * requests until we are within PND's max deadline value (256us).
995 * Idea being to be idle as long as possible while still taking
996 * advatange of PND's deadline scheduling. The limit of 8
997 * cachelines (used when the FIFO will anyway drain in less time
998 * than 256us) should match what we would be done if trickle
999 * feed were enabled.
1001 return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8);
1004 static bool vlv_compute_sr_wm(struct drm_device *dev,
1005 struct vlv_wm_values *wm)
1007 struct drm_i915_private *dev_priv = to_i915(dev);
1008 struct drm_crtc *crtc;
1009 enum pipe pipe = INVALID_PIPE;
1010 int num_planes = 0;
1011 int fifo_size = 0;
1012 struct intel_plane *plane;
1014 wm->sr.cursor = wm->sr.plane = 0;
1016 crtc = single_enabled_crtc(dev);
1017 /* maxfifo not supported on pipe C */
1018 if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) {
1019 pipe = to_intel_crtc(crtc)->pipe;
1020 num_planes = !!wm->pipe[pipe].primary +
1021 !!wm->pipe[pipe].sprite[0] +
1022 !!wm->pipe[pipe].sprite[1];
1023 fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
1026 if (fifo_size == 0 || num_planes > 1)
1027 return false;
1029 wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc),
1030 to_intel_plane(crtc->cursor), 0x3f);
1032 list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {
1033 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1034 continue;
1036 if (plane->pipe != pipe)
1037 continue;
1039 wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc),
1040 plane, fifo_size);
1041 if (wm->sr.plane != 0)
1042 break;
1045 return true;
1048 static void valleyview_update_wm(struct drm_crtc *crtc)
1050 struct drm_device *dev = crtc->dev;
1051 struct drm_i915_private *dev_priv = dev->dev_private;
1052 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1053 enum pipe pipe = intel_crtc->pipe;
1054 bool cxsr_enabled;
1055 struct vlv_wm_values wm = dev_priv->wm.vlv;
1057 wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary);
1058 wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc,
1059 to_intel_plane(crtc->primary),
1060 vlv_get_fifo_size(dev, pipe, 0));
1062 wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor);
1063 wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc,
1064 to_intel_plane(crtc->cursor),
1065 0x3f);
1067 cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
1069 if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
1070 return;
1072 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1073 "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
1074 wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1075 wm.sr.plane, wm.sr.cursor);
1078 * FIXME DDR DVFS introduces massive memory latencies which
1079 * are not known to system agent so any deadline specified
1080 * by the display may not be respected. To support DDR DVFS
1081 * the watermark code needs to be rewritten to essentially
1082 * bypass deadline mechanism and rely solely on the
1083 * watermarks. For now disable DDR DVFS.
1085 if (IS_CHERRYVIEW(dev_priv))
1086 chv_set_memory_dvfs(dev_priv, false);
1088 if (!cxsr_enabled)
1089 intel_set_memory_cxsr(dev_priv, false);
1091 vlv_write_wm_values(intel_crtc, &wm);
1093 if (cxsr_enabled)
1094 intel_set_memory_cxsr(dev_priv, true);
1097 static void valleyview_update_sprite_wm(struct drm_plane *plane,
1098 struct drm_crtc *crtc,
1099 uint32_t sprite_width,
1100 uint32_t sprite_height,
1101 int pixel_size,
1102 bool enabled, bool scaled)
1104 struct drm_device *dev = crtc->dev;
1105 struct drm_i915_private *dev_priv = dev->dev_private;
1106 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1107 enum pipe pipe = intel_crtc->pipe;
1108 int sprite = to_intel_plane(plane)->plane;
1109 bool cxsr_enabled;
1110 struct vlv_wm_values wm = dev_priv->wm.vlv;
1112 if (enabled) {
1113 wm.ddl[pipe].sprite[sprite] =
1114 vlv_compute_drain_latency(crtc, plane);
1116 wm.pipe[pipe].sprite[sprite] =
1117 vlv_compute_wm(intel_crtc,
1118 to_intel_plane(plane),
1119 vlv_get_fifo_size(dev, pipe, sprite+1));
1120 } else {
1121 wm.ddl[pipe].sprite[sprite] = 0;
1122 wm.pipe[pipe].sprite[sprite] = 0;
1125 cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
1127 if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
1128 return;
1130 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, "
1131 "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
1132 sprite_name(pipe, sprite),
1133 wm.pipe[pipe].sprite[sprite],
1134 wm.sr.plane, wm.sr.cursor);
1136 if (!cxsr_enabled)
1137 intel_set_memory_cxsr(dev_priv, false);
1139 vlv_write_wm_values(intel_crtc, &wm);
1141 if (cxsr_enabled)
1142 intel_set_memory_cxsr(dev_priv, true);
1145 #define single_plane_enabled(mask) is_power_of_2(mask)
1147 static void g4x_update_wm(struct drm_crtc *crtc)
1149 struct drm_device *dev = crtc->dev;
1150 static const int sr_latency_ns = 12000;
1151 struct drm_i915_private *dev_priv = dev->dev_private;
1152 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1153 int plane_sr, cursor_sr;
1154 unsigned int enabled = 0;
1155 bool cxsr_enabled;
1157 if (g4x_compute_wm0(dev, PIPE_A,
1158 &g4x_wm_info, pessimal_latency_ns,
1159 &g4x_cursor_wm_info, pessimal_latency_ns,
1160 &planea_wm, &cursora_wm))
1161 enabled |= 1 << PIPE_A;
1163 if (g4x_compute_wm0(dev, PIPE_B,
1164 &g4x_wm_info, pessimal_latency_ns,
1165 &g4x_cursor_wm_info, pessimal_latency_ns,
1166 &planeb_wm, &cursorb_wm))
1167 enabled |= 1 << PIPE_B;
1169 if (single_plane_enabled(enabled) &&
1170 g4x_compute_srwm(dev, ffs(enabled) - 1,
1171 sr_latency_ns,
1172 &g4x_wm_info,
1173 &g4x_cursor_wm_info,
1174 &plane_sr, &cursor_sr)) {
1175 cxsr_enabled = true;
1176 } else {
1177 cxsr_enabled = false;
1178 intel_set_memory_cxsr(dev_priv, false);
1179 plane_sr = cursor_sr = 0;
1182 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1183 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1184 planea_wm, cursora_wm,
1185 planeb_wm, cursorb_wm,
1186 plane_sr, cursor_sr);
1188 I915_WRITE(DSPFW1,
1189 FW_WM(plane_sr, SR) |
1190 FW_WM(cursorb_wm, CURSORB) |
1191 FW_WM(planeb_wm, PLANEB) |
1192 FW_WM(planea_wm, PLANEA));
1193 I915_WRITE(DSPFW2,
1194 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1195 FW_WM(cursora_wm, CURSORA));
1196 /* HPLL off in SR has some issues on G4x... disable it */
1197 I915_WRITE(DSPFW3,
1198 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1199 FW_WM(cursor_sr, CURSOR_SR));
1201 if (cxsr_enabled)
1202 intel_set_memory_cxsr(dev_priv, true);
1205 static void i965_update_wm(struct drm_crtc *unused_crtc)
1207 struct drm_device *dev = unused_crtc->dev;
1208 struct drm_i915_private *dev_priv = dev->dev_private;
1209 struct drm_crtc *crtc;
1210 int srwm = 1;
1211 int cursor_sr = 16;
1212 bool cxsr_enabled;
1214 /* Calc sr entries for one plane configs */
1215 crtc = single_enabled_crtc(dev);
1216 if (crtc) {
1217 /* self-refresh has much higher latency */
1218 static const int sr_latency_ns = 12000;
1219 const struct drm_display_mode *adjusted_mode =
1220 &to_intel_crtc(crtc)->config->base.adjusted_mode;
1221 int clock = adjusted_mode->crtc_clock;
1222 int htotal = adjusted_mode->crtc_htotal;
1223 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
1224 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
1225 unsigned long line_time_us;
1226 int entries;
1228 line_time_us = max(htotal * 1000 / clock, 1);
1230 /* Use ns/us then divide to preserve precision */
1231 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1232 pixel_size * hdisplay;
1233 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1234 srwm = I965_FIFO_SIZE - entries;
1235 if (srwm < 0)
1236 srwm = 1;
1237 srwm &= 0x1ff;
1238 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1239 entries, srwm);
1241 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1242 pixel_size * crtc->cursor->state->crtc_w;
1243 entries = DIV_ROUND_UP(entries,
1244 i965_cursor_wm_info.cacheline_size);
1245 cursor_sr = i965_cursor_wm_info.fifo_size -
1246 (entries + i965_cursor_wm_info.guard_size);
1248 if (cursor_sr > i965_cursor_wm_info.max_wm)
1249 cursor_sr = i965_cursor_wm_info.max_wm;
1251 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1252 "cursor %d\n", srwm, cursor_sr);
1254 cxsr_enabled = true;
1255 } else {
1256 cxsr_enabled = false;
1257 /* Turn off self refresh if both pipes are enabled */
1258 intel_set_memory_cxsr(dev_priv, false);
1261 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1262 srwm);
1264 /* 965 has limitations... */
1265 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1266 FW_WM(8, CURSORB) |
1267 FW_WM(8, PLANEB) |
1268 FW_WM(8, PLANEA));
1269 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1270 FW_WM(8, PLANEC_OLD));
1271 /* update cursor SR watermark */
1272 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1274 if (cxsr_enabled)
1275 intel_set_memory_cxsr(dev_priv, true);
1278 #undef FW_WM
1280 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1282 struct drm_device *dev = unused_crtc->dev;
1283 struct drm_i915_private *dev_priv = dev->dev_private;
1284 const struct intel_watermark_params *wm_info;
1285 uint32_t fwater_lo;
1286 uint32_t fwater_hi;
1287 int cwm, srwm = 1;
1288 int fifo_size;
1289 int planea_wm, planeb_wm;
1290 struct drm_crtc *crtc, *enabled = NULL;
1292 if (IS_I945GM(dev))
1293 wm_info = &i945_wm_info;
1294 else if (!IS_GEN2(dev))
1295 wm_info = &i915_wm_info;
1296 else
1297 wm_info = &i830_a_wm_info;
1299 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1300 crtc = intel_get_crtc_for_plane(dev, 0);
1301 if (intel_crtc_active(crtc)) {
1302 const struct drm_display_mode *adjusted_mode;
1303 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
1304 if (IS_GEN2(dev))
1305 cpp = 4;
1307 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1308 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1309 wm_info, fifo_size, cpp,
1310 pessimal_latency_ns);
1311 enabled = crtc;
1312 } else {
1313 planea_wm = fifo_size - wm_info->guard_size;
1314 if (planea_wm > (long)wm_info->max_wm)
1315 planea_wm = wm_info->max_wm;
1318 if (IS_GEN2(dev))
1319 wm_info = &i830_bc_wm_info;
1321 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1322 crtc = intel_get_crtc_for_plane(dev, 1);
1323 if (intel_crtc_active(crtc)) {
1324 const struct drm_display_mode *adjusted_mode;
1325 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
1326 if (IS_GEN2(dev))
1327 cpp = 4;
1329 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1330 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1331 wm_info, fifo_size, cpp,
1332 pessimal_latency_ns);
1333 if (enabled == NULL)
1334 enabled = crtc;
1335 else
1336 enabled = NULL;
1337 } else {
1338 planeb_wm = fifo_size - wm_info->guard_size;
1339 if (planeb_wm > (long)wm_info->max_wm)
1340 planeb_wm = wm_info->max_wm;
1343 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1345 if (IS_I915GM(dev) && enabled) {
1346 struct drm_i915_gem_object *obj;
1348 obj = intel_fb_obj(enabled->primary->state->fb);
1350 /* self-refresh seems busted with untiled */
1351 if (obj->tiling_mode == I915_TILING_NONE)
1352 enabled = NULL;
1356 * Overlay gets an aggressive default since video jitter is bad.
1358 cwm = 2;
1360 /* Play safe and disable self-refresh before adjusting watermarks. */
1361 intel_set_memory_cxsr(dev_priv, false);
1363 /* Calc sr entries for one plane configs */
1364 if (HAS_FW_BLC(dev) && enabled) {
1365 /* self-refresh has much higher latency */
1366 static const int sr_latency_ns = 6000;
1367 const struct drm_display_mode *adjusted_mode =
1368 &to_intel_crtc(enabled)->config->base.adjusted_mode;
1369 int clock = adjusted_mode->crtc_clock;
1370 int htotal = adjusted_mode->crtc_htotal;
1371 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
1372 int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
1373 unsigned long line_time_us;
1374 int entries;
1376 line_time_us = max(htotal * 1000 / clock, 1);
1378 /* Use ns/us then divide to preserve precision */
1379 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1380 pixel_size * hdisplay;
1381 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1382 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1383 srwm = wm_info->fifo_size - entries;
1384 if (srwm < 0)
1385 srwm = 1;
1387 if (IS_I945G(dev) || IS_I945GM(dev))
1388 I915_WRITE(FW_BLC_SELF,
1389 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1390 else if (IS_I915GM(dev))
1391 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1394 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1395 planea_wm, planeb_wm, cwm, srwm);
1397 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1398 fwater_hi = (cwm & 0x1f);
1400 /* Set request length to 8 cachelines per fetch */
1401 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1402 fwater_hi = fwater_hi | (1 << 8);
1404 I915_WRITE(FW_BLC, fwater_lo);
1405 I915_WRITE(FW_BLC2, fwater_hi);
1407 if (enabled)
1408 intel_set_memory_cxsr(dev_priv, true);
1411 static void i845_update_wm(struct drm_crtc *unused_crtc)
1413 struct drm_device *dev = unused_crtc->dev;
1414 struct drm_i915_private *dev_priv = dev->dev_private;
1415 struct drm_crtc *crtc;
1416 const struct drm_display_mode *adjusted_mode;
1417 uint32_t fwater_lo;
1418 int planea_wm;
1420 crtc = single_enabled_crtc(dev);
1421 if (crtc == NULL)
1422 return;
1424 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1425 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1426 &i845_wm_info,
1427 dev_priv->display.get_fifo_size(dev, 0),
1428 4, pessimal_latency_ns);
1429 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1430 fwater_lo |= (3<<8) | planea_wm;
1432 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1434 I915_WRITE(FW_BLC, fwater_lo);
1437 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1438 struct drm_crtc *crtc)
1440 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1441 uint32_t pixel_rate;
1443 pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock;
1445 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1446 * adjust the pixel_rate here. */
1448 if (intel_crtc->config->pch_pfit.enabled) {
1449 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1450 uint32_t pfit_size = intel_crtc->config->pch_pfit.size;
1452 pipe_w = intel_crtc->config->pipe_src_w;
1453 pipe_h = intel_crtc->config->pipe_src_h;
1454 pfit_w = (pfit_size >> 16) & 0xFFFF;
1455 pfit_h = pfit_size & 0xFFFF;
1456 if (pipe_w < pfit_w)
1457 pipe_w = pfit_w;
1458 if (pipe_h < pfit_h)
1459 pipe_h = pfit_h;
1461 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1462 pfit_w * pfit_h);
1465 return pixel_rate;
1468 /* latency must be in 0.1us units. */
1469 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1470 uint32_t latency)
1472 uint64_t ret;
1474 if (WARN(latency == 0, "Latency value missing\n"))
1475 return UINT_MAX;
1477 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1478 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1480 return ret;
1483 /* latency must be in 0.1us units. */
1484 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1485 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1486 uint32_t latency)
1488 uint32_t ret;
1490 if (WARN(latency == 0, "Latency value missing\n"))
1491 return UINT_MAX;
1493 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1494 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1495 ret = DIV_ROUND_UP(ret, 64) + 2;
1496 return ret;
1499 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1500 uint8_t bytes_per_pixel)
1502 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1505 struct skl_pipe_wm_parameters {
1506 bool active;
1507 uint32_t pipe_htotal;
1508 uint32_t pixel_rate; /* in KHz */
1509 struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
1510 struct intel_plane_wm_parameters cursor;
1513 struct ilk_pipe_wm_parameters {
1514 bool active;
1515 uint32_t pipe_htotal;
1516 uint32_t pixel_rate;
1517 struct intel_plane_wm_parameters pri;
1518 struct intel_plane_wm_parameters spr;
1519 struct intel_plane_wm_parameters cur;
1522 struct ilk_wm_maximums {
1523 uint16_t pri;
1524 uint16_t spr;
1525 uint16_t cur;
1526 uint16_t fbc;
1529 /* used in computing the new watermarks state */
1530 struct intel_wm_config {
1531 unsigned int num_pipes_active;
1532 bool sprites_enabled;
1533 bool sprites_scaled;
1537 * For both WM_PIPE and WM_LP.
1538 * mem_value must be in 0.1us units.
1540 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1541 uint32_t mem_value,
1542 bool is_lp)
1544 uint32_t method1, method2;
1546 if (!params->active || !params->pri.enabled)
1547 return 0;
1549 method1 = ilk_wm_method1(params->pixel_rate,
1550 params->pri.bytes_per_pixel,
1551 mem_value);
1553 if (!is_lp)
1554 return method1;
1556 method2 = ilk_wm_method2(params->pixel_rate,
1557 params->pipe_htotal,
1558 params->pri.horiz_pixels,
1559 params->pri.bytes_per_pixel,
1560 mem_value);
1562 return min(method1, method2);
1566 * For both WM_PIPE and WM_LP.
1567 * mem_value must be in 0.1us units.
1569 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1570 uint32_t mem_value)
1572 uint32_t method1, method2;
1574 if (!params->active || !params->spr.enabled)
1575 return 0;
1577 method1 = ilk_wm_method1(params->pixel_rate,
1578 params->spr.bytes_per_pixel,
1579 mem_value);
1580 method2 = ilk_wm_method2(params->pixel_rate,
1581 params->pipe_htotal,
1582 params->spr.horiz_pixels,
1583 params->spr.bytes_per_pixel,
1584 mem_value);
1585 return min(method1, method2);
1589 * For both WM_PIPE and WM_LP.
1590 * mem_value must be in 0.1us units.
1592 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
1593 uint32_t mem_value)
1595 if (!params->active || !params->cur.enabled)
1596 return 0;
1598 return ilk_wm_method2(params->pixel_rate,
1599 params->pipe_htotal,
1600 params->cur.horiz_pixels,
1601 params->cur.bytes_per_pixel,
1602 mem_value);
1605 /* Only for WM_LP. */
1606 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
1607 uint32_t pri_val)
1609 if (!params->active || !params->pri.enabled)
1610 return 0;
1612 return ilk_wm_fbc(pri_val,
1613 params->pri.horiz_pixels,
1614 params->pri.bytes_per_pixel);
1617 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1619 if (INTEL_INFO(dev)->gen >= 8)
1620 return 3072;
1621 else if (INTEL_INFO(dev)->gen >= 7)
1622 return 768;
1623 else
1624 return 512;
1627 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1628 int level, bool is_sprite)
1630 if (INTEL_INFO(dev)->gen >= 8)
1631 /* BDW primary/sprite plane watermarks */
1632 return level == 0 ? 255 : 2047;
1633 else if (INTEL_INFO(dev)->gen >= 7)
1634 /* IVB/HSW primary/sprite plane watermarks */
1635 return level == 0 ? 127 : 1023;
1636 else if (!is_sprite)
1637 /* ILK/SNB primary plane watermarks */
1638 return level == 0 ? 127 : 511;
1639 else
1640 /* ILK/SNB sprite plane watermarks */
1641 return level == 0 ? 63 : 255;
1644 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1645 int level)
1647 if (INTEL_INFO(dev)->gen >= 7)
1648 return level == 0 ? 63 : 255;
1649 else
1650 return level == 0 ? 31 : 63;
1653 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1655 if (INTEL_INFO(dev)->gen >= 8)
1656 return 31;
1657 else
1658 return 15;
1661 /* Calculate the maximum primary/sprite plane watermark */
1662 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1663 int level,
1664 const struct intel_wm_config *config,
1665 enum intel_ddb_partitioning ddb_partitioning,
1666 bool is_sprite)
1668 unsigned int fifo_size = ilk_display_fifo_size(dev);
1670 /* if sprites aren't enabled, sprites get nothing */
1671 if (is_sprite && !config->sprites_enabled)
1672 return 0;
1674 /* HSW allows LP1+ watermarks even with multiple pipes */
1675 if (level == 0 || config->num_pipes_active > 1) {
1676 fifo_size /= INTEL_INFO(dev)->num_pipes;
1679 * For some reason the non self refresh
1680 * FIFO size is only half of the self
1681 * refresh FIFO size on ILK/SNB.
1683 if (INTEL_INFO(dev)->gen <= 6)
1684 fifo_size /= 2;
1687 if (config->sprites_enabled) {
1688 /* level 0 is always calculated with 1:1 split */
1689 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1690 if (is_sprite)
1691 fifo_size *= 5;
1692 fifo_size /= 6;
1693 } else {
1694 fifo_size /= 2;
1698 /* clamp to max that the registers can hold */
1699 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1702 /* Calculate the maximum cursor plane watermark */
1703 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1704 int level,
1705 const struct intel_wm_config *config)
1707 /* HSW LP1+ watermarks w/ multiple pipes */
1708 if (level > 0 && config->num_pipes_active > 1)
1709 return 64;
1711 /* otherwise just report max that registers can hold */
1712 return ilk_cursor_wm_reg_max(dev, level);
1715 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1716 int level,
1717 const struct intel_wm_config *config,
1718 enum intel_ddb_partitioning ddb_partitioning,
1719 struct ilk_wm_maximums *max)
1721 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1722 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1723 max->cur = ilk_cursor_wm_max(dev, level, config);
1724 max->fbc = ilk_fbc_wm_reg_max(dev);
1727 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1728 int level,
1729 struct ilk_wm_maximums *max)
1731 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1732 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1733 max->cur = ilk_cursor_wm_reg_max(dev, level);
1734 max->fbc = ilk_fbc_wm_reg_max(dev);
1737 static bool ilk_validate_wm_level(int level,
1738 const struct ilk_wm_maximums *max,
1739 struct intel_wm_level *result)
1741 bool ret;
1743 /* already determined to be invalid? */
1744 if (!result->enable)
1745 return false;
1747 result->enable = result->pri_val <= max->pri &&
1748 result->spr_val <= max->spr &&
1749 result->cur_val <= max->cur;
1751 ret = result->enable;
1754 * HACK until we can pre-compute everything,
1755 * and thus fail gracefully if LP0 watermarks
1756 * are exceeded...
1758 if (level == 0 && !result->enable) {
1759 if (result->pri_val > max->pri)
1760 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1761 level, result->pri_val, max->pri);
1762 if (result->spr_val > max->spr)
1763 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1764 level, result->spr_val, max->spr);
1765 if (result->cur_val > max->cur)
1766 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1767 level, result->cur_val, max->cur);
1769 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1770 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1771 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1772 result->enable = true;
1775 return ret;
1778 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1779 int level,
1780 const struct ilk_pipe_wm_parameters *p,
1781 struct intel_wm_level *result)
1783 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
1784 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
1785 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
1787 /* WM1+ latency values stored in 0.5us units */
1788 if (level > 0) {
1789 pri_latency *= 5;
1790 spr_latency *= 5;
1791 cur_latency *= 5;
1794 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
1795 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
1796 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
1797 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
1798 result->enable = true;
1801 static uint32_t
1802 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
1804 struct drm_i915_private *dev_priv = dev->dev_private;
1805 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1806 struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
1807 u32 linetime, ips_linetime;
1809 if (!intel_crtc->active)
1810 return 0;
1812 /* The WM are computed with base on how long it takes to fill a single
1813 * row at the given clock rate, multiplied by 8.
1814 * */
1815 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
1816 mode->crtc_clock);
1817 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
1818 dev_priv->display.get_display_clock_speed(dev_priv->dev));
1820 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
1821 PIPE_WM_LINETIME_TIME(linetime);
1824 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
1826 struct drm_i915_private *dev_priv = dev->dev_private;
1828 if (IS_GEN9(dev)) {
1829 uint32_t val;
1830 int ret, i;
1831 int level, max_level = ilk_wm_max_level(dev);
1833 /* read the first set of memory latencies[0:3] */
1834 val = 0; /* data0 to be programmed to 0 for first set */
1835 mutex_lock(&dev_priv->rps.hw_lock);
1836 ret = sandybridge_pcode_read(dev_priv,
1837 GEN9_PCODE_READ_MEM_LATENCY,
1838 &val);
1839 mutex_unlock(&dev_priv->rps.hw_lock);
1841 if (ret) {
1842 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
1843 return;
1846 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
1847 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
1848 GEN9_MEM_LATENCY_LEVEL_MASK;
1849 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
1850 GEN9_MEM_LATENCY_LEVEL_MASK;
1851 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
1852 GEN9_MEM_LATENCY_LEVEL_MASK;
1854 /* read the second set of memory latencies[4:7] */
1855 val = 1; /* data0 to be programmed to 1 for second set */
1856 mutex_lock(&dev_priv->rps.hw_lock);
1857 ret = sandybridge_pcode_read(dev_priv,
1858 GEN9_PCODE_READ_MEM_LATENCY,
1859 &val);
1860 mutex_unlock(&dev_priv->rps.hw_lock);
1861 if (ret) {
1862 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
1863 return;
1866 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
1867 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
1868 GEN9_MEM_LATENCY_LEVEL_MASK;
1869 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
1870 GEN9_MEM_LATENCY_LEVEL_MASK;
1871 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
1872 GEN9_MEM_LATENCY_LEVEL_MASK;
1875 * WaWmMemoryReadLatency:skl
1877 * punit doesn't take into account the read latency so we need
1878 * to add 2us to the various latency levels we retrieve from
1879 * the punit.
1880 * - W0 is a bit special in that it's the only level that
1881 * can't be disabled if we want to have display working, so
1882 * we always add 2us there.
1883 * - For levels >=1, punit returns 0us latency when they are
1884 * disabled, so we respect that and don't add 2us then
1886 * Additionally, if a level n (n > 1) has a 0us latency, all
1887 * levels m (m >= n) need to be disabled. We make sure to
1888 * sanitize the values out of the punit to satisfy this
1889 * requirement.
1891 wm[0] += 2;
1892 for (level = 1; level <= max_level; level++)
1893 if (wm[level] != 0)
1894 wm[level] += 2;
1895 else {
1896 for (i = level + 1; i <= max_level; i++)
1897 wm[i] = 0;
1899 break;
1901 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1902 uint64_t sskpd = I915_READ64(MCH_SSKPD);
1904 wm[0] = (sskpd >> 56) & 0xFF;
1905 if (wm[0] == 0)
1906 wm[0] = sskpd & 0xF;
1907 wm[1] = (sskpd >> 4) & 0xFF;
1908 wm[2] = (sskpd >> 12) & 0xFF;
1909 wm[3] = (sskpd >> 20) & 0x1FF;
1910 wm[4] = (sskpd >> 32) & 0x1FF;
1911 } else if (INTEL_INFO(dev)->gen >= 6) {
1912 uint32_t sskpd = I915_READ(MCH_SSKPD);
1914 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
1915 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
1916 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
1917 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
1918 } else if (INTEL_INFO(dev)->gen >= 5) {
1919 uint32_t mltr = I915_READ(MLTR_ILK);
1921 /* ILK primary LP0 latency is 700 ns */
1922 wm[0] = 7;
1923 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
1924 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
1928 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
1930 /* ILK sprite LP0 latency is 1300 ns */
1931 if (INTEL_INFO(dev)->gen == 5)
1932 wm[0] = 13;
1935 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
1937 /* ILK cursor LP0 latency is 1300 ns */
1938 if (INTEL_INFO(dev)->gen == 5)
1939 wm[0] = 13;
1941 /* WaDoubleCursorLP3Latency:ivb */
1942 if (IS_IVYBRIDGE(dev))
1943 wm[3] *= 2;
1946 int ilk_wm_max_level(const struct drm_device *dev)
1948 /* how many WM levels are we expecting */
1949 if (INTEL_INFO(dev)->gen >= 9)
1950 return 7;
1951 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1952 return 4;
1953 else if (INTEL_INFO(dev)->gen >= 6)
1954 return 3;
1955 else
1956 return 2;
1959 static void intel_print_wm_latency(struct drm_device *dev,
1960 const char *name,
1961 const uint16_t wm[8])
1963 int level, max_level = ilk_wm_max_level(dev);
1965 for (level = 0; level <= max_level; level++) {
1966 unsigned int latency = wm[level];
1968 if (latency == 0) {
1969 DRM_ERROR("%s WM%d latency not provided\n",
1970 name, level);
1971 continue;
1975 * - latencies are in us on gen9.
1976 * - before then, WM1+ latency values are in 0.5us units
1978 if (IS_GEN9(dev))
1979 latency *= 10;
1980 else if (level > 0)
1981 latency *= 5;
1983 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
1984 name, level, wm[level],
1985 latency / 10, latency % 10);
1989 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
1990 uint16_t wm[5], uint16_t min)
1992 int level, max_level = ilk_wm_max_level(dev_priv->dev);
1994 if (wm[0] >= min)
1995 return false;
1997 wm[0] = max(wm[0], min);
1998 for (level = 1; level <= max_level; level++)
1999 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2001 return true;
2004 static void snb_wm_latency_quirk(struct drm_device *dev)
2006 struct drm_i915_private *dev_priv = dev->dev_private;
2007 bool changed;
2010 * The BIOS provided WM memory latency values are often
2011 * inadequate for high resolution displays. Adjust them.
2013 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2014 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2015 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2017 if (!changed)
2018 return;
2020 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2021 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2022 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2023 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2026 static void ilk_setup_wm_latency(struct drm_device *dev)
2028 struct drm_i915_private *dev_priv = dev->dev_private;
2030 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2032 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2033 sizeof(dev_priv->wm.pri_latency));
2034 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2035 sizeof(dev_priv->wm.pri_latency));
2037 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2038 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2040 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2041 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2042 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2044 if (IS_GEN6(dev))
2045 snb_wm_latency_quirk(dev);
2048 static void skl_setup_wm_latency(struct drm_device *dev)
2050 struct drm_i915_private *dev_priv = dev->dev_private;
2052 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2053 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2056 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2057 struct ilk_pipe_wm_parameters *p)
2059 struct drm_device *dev = crtc->dev;
2060 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2061 enum pipe pipe = intel_crtc->pipe;
2062 struct drm_plane *plane;
2064 if (!intel_crtc->active)
2065 return;
2067 p->active = true;
2068 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
2069 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2071 if (crtc->primary->state->fb)
2072 p->pri.bytes_per_pixel =
2073 crtc->primary->state->fb->bits_per_pixel / 8;
2074 else
2075 p->pri.bytes_per_pixel = 4;
2077 p->cur.bytes_per_pixel = 4;
2079 * TODO: for now, assume primary and cursor planes are always enabled.
2080 * Setting them to false makes the screen flicker.
2082 p->pri.enabled = true;
2083 p->cur.enabled = true;
2085 p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
2086 p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
2088 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2089 struct intel_plane *intel_plane = to_intel_plane(plane);
2091 if (intel_plane->pipe == pipe) {
2092 p->spr = intel_plane->wm;
2093 break;
2098 static void ilk_compute_wm_config(struct drm_device *dev,
2099 struct intel_wm_config *config)
2101 struct intel_crtc *intel_crtc;
2103 /* Compute the currently _active_ config */
2104 for_each_intel_crtc(dev, intel_crtc) {
2105 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2107 if (!wm->pipe_enabled)
2108 continue;
2110 config->sprites_enabled |= wm->sprites_enabled;
2111 config->sprites_scaled |= wm->sprites_scaled;
2112 config->num_pipes_active++;
2116 /* Compute new watermarks for the pipe */
2117 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2118 const struct ilk_pipe_wm_parameters *params,
2119 struct intel_pipe_wm *pipe_wm)
2121 struct drm_device *dev = crtc->dev;
2122 const struct drm_i915_private *dev_priv = dev->dev_private;
2123 int level, max_level = ilk_wm_max_level(dev);
2124 /* LP0 watermark maximums depend on this pipe alone */
2125 struct intel_wm_config config = {
2126 .num_pipes_active = 1,
2127 .sprites_enabled = params->spr.enabled,
2128 .sprites_scaled = params->spr.scaled,
2130 struct ilk_wm_maximums max;
2132 pipe_wm->pipe_enabled = params->active;
2133 pipe_wm->sprites_enabled = params->spr.enabled;
2134 pipe_wm->sprites_scaled = params->spr.scaled;
2136 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2137 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2138 max_level = 1;
2140 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2141 if (params->spr.scaled)
2142 max_level = 0;
2144 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
2146 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2147 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2149 /* LP0 watermarks always use 1/2 DDB partitioning */
2150 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2152 /* At least LP0 must be valid */
2153 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2154 return false;
2156 ilk_compute_wm_reg_maximums(dev, 1, &max);
2158 for (level = 1; level <= max_level; level++) {
2159 struct intel_wm_level wm = {};
2161 ilk_compute_wm_level(dev_priv, level, params, &wm);
2164 * Disable any watermark level that exceeds the
2165 * register maximums since such watermarks are
2166 * always invalid.
2168 if (!ilk_validate_wm_level(level, &max, &wm))
2169 break;
2171 pipe_wm->wm[level] = wm;
2174 return true;
2178 * Merge the watermarks from all active pipes for a specific level.
2180 static void ilk_merge_wm_level(struct drm_device *dev,
2181 int level,
2182 struct intel_wm_level *ret_wm)
2184 const struct intel_crtc *intel_crtc;
2186 ret_wm->enable = true;
2188 for_each_intel_crtc(dev, intel_crtc) {
2189 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2190 const struct intel_wm_level *wm = &active->wm[level];
2192 if (!active->pipe_enabled)
2193 continue;
2196 * The watermark values may have been used in the past,
2197 * so we must maintain them in the registers for some
2198 * time even if the level is now disabled.
2200 if (!wm->enable)
2201 ret_wm->enable = false;
2203 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2204 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2205 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2206 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2211 * Merge all low power watermarks for all active pipes.
2213 static void ilk_wm_merge(struct drm_device *dev,
2214 const struct intel_wm_config *config,
2215 const struct ilk_wm_maximums *max,
2216 struct intel_pipe_wm *merged)
2218 int level, max_level = ilk_wm_max_level(dev);
2219 int last_enabled_level = max_level;
2221 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2222 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2223 config->num_pipes_active > 1)
2224 return;
2226 /* ILK: FBC WM must be disabled always */
2227 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2229 /* merge each WM1+ level */
2230 for (level = 1; level <= max_level; level++) {
2231 struct intel_wm_level *wm = &merged->wm[level];
2233 ilk_merge_wm_level(dev, level, wm);
2235 if (level > last_enabled_level)
2236 wm->enable = false;
2237 else if (!ilk_validate_wm_level(level, max, wm))
2238 /* make sure all following levels get disabled */
2239 last_enabled_level = level - 1;
2242 * The spec says it is preferred to disable
2243 * FBC WMs instead of disabling a WM level.
2245 if (wm->fbc_val > max->fbc) {
2246 if (wm->enable)
2247 merged->fbc_wm_enabled = false;
2248 wm->fbc_val = 0;
2252 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2254 * FIXME this is racy. FBC might get enabled later.
2255 * What we should check here is whether FBC can be
2256 * enabled sometime later.
2258 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2259 for (level = 2; level <= max_level; level++) {
2260 struct intel_wm_level *wm = &merged->wm[level];
2262 wm->enable = false;
2267 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2269 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2270 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2273 /* The value we need to program into the WM_LPx latency field */
2274 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2276 struct drm_i915_private *dev_priv = dev->dev_private;
2278 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2279 return 2 * level;
2280 else
2281 return dev_priv->wm.pri_latency[level];
2284 static void ilk_compute_wm_results(struct drm_device *dev,
2285 const struct intel_pipe_wm *merged,
2286 enum intel_ddb_partitioning partitioning,
2287 struct ilk_wm_values *results)
2289 struct intel_crtc *intel_crtc;
2290 int level, wm_lp;
2292 results->enable_fbc_wm = merged->fbc_wm_enabled;
2293 results->partitioning = partitioning;
2295 /* LP1+ register values */
2296 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2297 const struct intel_wm_level *r;
2299 level = ilk_wm_lp_to_level(wm_lp, merged);
2301 r = &merged->wm[level];
2304 * Maintain the watermark values even if the level is
2305 * disabled. Doing otherwise could cause underruns.
2307 results->wm_lp[wm_lp - 1] =
2308 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2309 (r->pri_val << WM1_LP_SR_SHIFT) |
2310 r->cur_val;
2312 if (r->enable)
2313 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2315 if (INTEL_INFO(dev)->gen >= 8)
2316 results->wm_lp[wm_lp - 1] |=
2317 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2318 else
2319 results->wm_lp[wm_lp - 1] |=
2320 r->fbc_val << WM1_LP_FBC_SHIFT;
2323 * Always set WM1S_LP_EN when spr_val != 0, even if the
2324 * level is disabled. Doing otherwise could cause underruns.
2326 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2327 WARN_ON(wm_lp != 1);
2328 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2329 } else
2330 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2333 /* LP0 register values */
2334 for_each_intel_crtc(dev, intel_crtc) {
2335 enum pipe pipe = intel_crtc->pipe;
2336 const struct intel_wm_level *r =
2337 &intel_crtc->wm.active.wm[0];
2339 if (WARN_ON(!r->enable))
2340 continue;
2342 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2344 results->wm_pipe[pipe] =
2345 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2346 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2347 r->cur_val;
2351 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2352 * case both are at the same level. Prefer r1 in case they're the same. */
2353 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2354 struct intel_pipe_wm *r1,
2355 struct intel_pipe_wm *r2)
2357 int level, max_level = ilk_wm_max_level(dev);
2358 int level1 = 0, level2 = 0;
2360 for (level = 1; level <= max_level; level++) {
2361 if (r1->wm[level].enable)
2362 level1 = level;
2363 if (r2->wm[level].enable)
2364 level2 = level;
2367 if (level1 == level2) {
2368 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2369 return r2;
2370 else
2371 return r1;
2372 } else if (level1 > level2) {
2373 return r1;
2374 } else {
2375 return r2;
2379 /* dirty bits used to track which watermarks need changes */
2380 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2381 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2382 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2383 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2384 #define WM_DIRTY_FBC (1 << 24)
2385 #define WM_DIRTY_DDB (1 << 25)
2387 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2388 const struct ilk_wm_values *old,
2389 const struct ilk_wm_values *new)
2391 unsigned int dirty = 0;
2392 enum pipe pipe;
2393 int wm_lp;
2395 for_each_pipe(dev_priv, pipe) {
2396 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2397 dirty |= WM_DIRTY_LINETIME(pipe);
2398 /* Must disable LP1+ watermarks too */
2399 dirty |= WM_DIRTY_LP_ALL;
2402 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2403 dirty |= WM_DIRTY_PIPE(pipe);
2404 /* Must disable LP1+ watermarks too */
2405 dirty |= WM_DIRTY_LP_ALL;
2409 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2410 dirty |= WM_DIRTY_FBC;
2411 /* Must disable LP1+ watermarks too */
2412 dirty |= WM_DIRTY_LP_ALL;
2415 if (old->partitioning != new->partitioning) {
2416 dirty |= WM_DIRTY_DDB;
2417 /* Must disable LP1+ watermarks too */
2418 dirty |= WM_DIRTY_LP_ALL;
2421 /* LP1+ watermarks already deemed dirty, no need to continue */
2422 if (dirty & WM_DIRTY_LP_ALL)
2423 return dirty;
2425 /* Find the lowest numbered LP1+ watermark in need of an update... */
2426 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2427 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2428 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2429 break;
2432 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2433 for (; wm_lp <= 3; wm_lp++)
2434 dirty |= WM_DIRTY_LP(wm_lp);
2436 return dirty;
2439 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2440 unsigned int dirty)
2442 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2443 bool changed = false;
2445 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2446 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2447 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2448 changed = true;
2450 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2451 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2452 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2453 changed = true;
2455 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2456 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2457 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2458 changed = true;
2462 * Don't touch WM1S_LP_EN here.
2463 * Doing so could cause underruns.
2466 return changed;
2470 * The spec says we shouldn't write when we don't need, because every write
2471 * causes WMs to be re-evaluated, expending some power.
2473 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2474 struct ilk_wm_values *results)
2476 struct drm_device *dev = dev_priv->dev;
2477 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2478 unsigned int dirty;
2479 uint32_t val;
2481 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2482 if (!dirty)
2483 return;
2485 _ilk_disable_lp_wm(dev_priv, dirty);
2487 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2488 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2489 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2490 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2491 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2492 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2494 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2495 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2496 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2497 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2498 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2499 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2501 if (dirty & WM_DIRTY_DDB) {
2502 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2503 val = I915_READ(WM_MISC);
2504 if (results->partitioning == INTEL_DDB_PART_1_2)
2505 val &= ~WM_MISC_DATA_PARTITION_5_6;
2506 else
2507 val |= WM_MISC_DATA_PARTITION_5_6;
2508 I915_WRITE(WM_MISC, val);
2509 } else {
2510 val = I915_READ(DISP_ARB_CTL2);
2511 if (results->partitioning == INTEL_DDB_PART_1_2)
2512 val &= ~DISP_DATA_PARTITION_5_6;
2513 else
2514 val |= DISP_DATA_PARTITION_5_6;
2515 I915_WRITE(DISP_ARB_CTL2, val);
2519 if (dirty & WM_DIRTY_FBC) {
2520 val = I915_READ(DISP_ARB_CTL);
2521 if (results->enable_fbc_wm)
2522 val &= ~DISP_FBC_WM_DIS;
2523 else
2524 val |= DISP_FBC_WM_DIS;
2525 I915_WRITE(DISP_ARB_CTL, val);
2528 if (dirty & WM_DIRTY_LP(1) &&
2529 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2530 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2532 if (INTEL_INFO(dev)->gen >= 7) {
2533 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2534 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2535 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2536 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2539 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2540 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2541 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2542 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2543 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2544 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2546 dev_priv->wm.hw = *results;
2549 static bool ilk_disable_lp_wm(struct drm_device *dev)
2551 struct drm_i915_private *dev_priv = dev->dev_private;
2553 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2557 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2558 * different active planes.
2561 #define SKL_DDB_SIZE 896 /* in blocks */
2562 #define BXT_DDB_SIZE 512
2564 static void
2565 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2566 struct drm_crtc *for_crtc,
2567 const struct intel_wm_config *config,
2568 const struct skl_pipe_wm_parameters *params,
2569 struct skl_ddb_entry *alloc /* out */)
2571 struct drm_crtc *crtc;
2572 unsigned int pipe_size, ddb_size;
2573 int nth_active_pipe;
2575 if (!params->active) {
2576 alloc->start = 0;
2577 alloc->end = 0;
2578 return;
2581 if (IS_BROXTON(dev))
2582 ddb_size = BXT_DDB_SIZE;
2583 else
2584 ddb_size = SKL_DDB_SIZE;
2586 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2588 nth_active_pipe = 0;
2589 for_each_crtc(dev, crtc) {
2590 if (!to_intel_crtc(crtc)->active)
2591 continue;
2593 if (crtc == for_crtc)
2594 break;
2596 nth_active_pipe++;
2599 pipe_size = ddb_size / config->num_pipes_active;
2600 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
2601 alloc->end = alloc->start + pipe_size;
2604 static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
2606 if (config->num_pipes_active == 1)
2607 return 32;
2609 return 8;
2612 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2614 entry->start = reg & 0x3ff;
2615 entry->end = (reg >> 16) & 0x3ff;
2616 if (entry->end)
2617 entry->end += 1;
2620 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2621 struct skl_ddb_allocation *ddb /* out */)
2623 enum pipe pipe;
2624 int plane;
2625 u32 val;
2627 for_each_pipe(dev_priv, pipe) {
2628 for_each_plane(dev_priv, pipe, plane) {
2629 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2630 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2631 val);
2634 val = I915_READ(CUR_BUF_CFG(pipe));
2635 skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
2639 static unsigned int
2640 skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
2643 /* for planar format */
2644 if (p->y_bytes_per_pixel) {
2645 if (y) /* y-plane data rate */
2646 return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel;
2647 else /* uv-plane data rate */
2648 return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel;
2651 /* for packed formats */
2652 return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
2656 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2657 * a 8192x4096@32bpp framebuffer:
2658 * 3 * 4096 * 8192 * 4 < 2^32
2660 static unsigned int
2661 skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
2662 const struct skl_pipe_wm_parameters *params)
2664 unsigned int total_data_rate = 0;
2665 int plane;
2667 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
2668 const struct intel_plane_wm_parameters *p;
2670 p = &params->plane[plane];
2671 if (!p->enabled)
2672 continue;
2674 total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */
2675 if (p->y_bytes_per_pixel) {
2676 total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */
2680 return total_data_rate;
2683 static void
2684 skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2685 const struct intel_wm_config *config,
2686 const struct skl_pipe_wm_parameters *params,
2687 struct skl_ddb_allocation *ddb /* out */)
2689 struct drm_device *dev = crtc->dev;
2690 struct drm_i915_private *dev_priv = dev->dev_private;
2691 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2692 enum pipe pipe = intel_crtc->pipe;
2693 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
2694 uint16_t alloc_size, start, cursor_blocks;
2695 uint16_t minimum[I915_MAX_PLANES];
2696 uint16_t y_minimum[I915_MAX_PLANES];
2697 unsigned int total_data_rate;
2698 int plane;
2700 skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
2701 alloc_size = skl_ddb_entry_size(alloc);
2702 if (alloc_size == 0) {
2703 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
2704 memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
2705 return;
2708 cursor_blocks = skl_cursor_allocation(config);
2709 ddb->cursor[pipe].start = alloc->end - cursor_blocks;
2710 ddb->cursor[pipe].end = alloc->end;
2712 alloc_size -= cursor_blocks;
2713 alloc->end -= cursor_blocks;
2715 /* 1. Allocate the mininum required blocks for each active plane */
2716 for_each_plane(dev_priv, pipe, plane) {
2717 const struct intel_plane_wm_parameters *p;
2719 p = &params->plane[plane];
2720 if (!p->enabled)
2721 continue;
2723 minimum[plane] = 8;
2724 alloc_size -= minimum[plane];
2725 y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0;
2726 alloc_size -= y_minimum[plane];
2730 * 2. Distribute the remaining space in proportion to the amount of
2731 * data each plane needs to fetch from memory.
2733 * FIXME: we may not allocate every single block here.
2735 total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
2737 start = alloc->start;
2738 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
2739 const struct intel_plane_wm_parameters *p;
2740 unsigned int data_rate, y_data_rate;
2741 uint16_t plane_blocks, y_plane_blocks = 0;
2743 p = &params->plane[plane];
2744 if (!p->enabled)
2745 continue;
2747 data_rate = skl_plane_relative_data_rate(p, 0);
2750 * allocation for (packed formats) or (uv-plane part of planar format):
2751 * promote the expression to 64 bits to avoid overflowing, the
2752 * result is < available as data_rate / total_data_rate < 1
2754 plane_blocks = minimum[plane];
2755 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
2756 total_data_rate);
2758 ddb->plane[pipe][plane].start = start;
2759 ddb->plane[pipe][plane].end = start + plane_blocks;
2761 start += plane_blocks;
2764 * allocation for y_plane part of planar format:
2766 if (p->y_bytes_per_pixel) {
2767 y_data_rate = skl_plane_relative_data_rate(p, 1);
2768 y_plane_blocks = y_minimum[plane];
2769 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
2770 total_data_rate);
2772 ddb->y_plane[pipe][plane].start = start;
2773 ddb->y_plane[pipe][plane].end = start + y_plane_blocks;
2775 start += y_plane_blocks;
2782 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
2784 /* TODO: Take into account the scalers once we support them */
2785 return config->base.adjusted_mode.crtc_clock;
2789 * The max latency should be 257 (max the punit can code is 255 and we add 2us
2790 * for the read latency) and bytes_per_pixel should always be <= 8, so that
2791 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
2792 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
2794 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2795 uint32_t latency)
2797 uint32_t wm_intermediate_val, ret;
2799 if (latency == 0)
2800 return UINT_MAX;
2802 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
2803 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
2805 return ret;
2808 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2809 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2810 uint64_t tiling, uint32_t latency)
2812 uint32_t ret;
2813 uint32_t plane_bytes_per_line, plane_blocks_per_line;
2814 uint32_t wm_intermediate_val;
2816 if (latency == 0)
2817 return UINT_MAX;
2819 plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
2821 if (tiling == I915_FORMAT_MOD_Y_TILED ||
2822 tiling == I915_FORMAT_MOD_Yf_TILED) {
2823 plane_bytes_per_line *= 4;
2824 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2825 plane_blocks_per_line /= 4;
2826 } else {
2827 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2830 wm_intermediate_val = latency * pixel_rate;
2831 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
2832 plane_blocks_per_line;
2834 return ret;
2837 static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
2838 const struct intel_crtc *intel_crtc)
2840 struct drm_device *dev = intel_crtc->base.dev;
2841 struct drm_i915_private *dev_priv = dev->dev_private;
2842 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
2843 enum pipe pipe = intel_crtc->pipe;
2845 if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
2846 sizeof(new_ddb->plane[pipe])))
2847 return true;
2849 if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
2850 sizeof(new_ddb->cursor[pipe])))
2851 return true;
2853 return false;
2856 static void skl_compute_wm_global_parameters(struct drm_device *dev,
2857 struct intel_wm_config *config)
2859 struct drm_crtc *crtc;
2860 struct drm_plane *plane;
2862 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2863 config->num_pipes_active += to_intel_crtc(crtc)->active;
2865 /* FIXME: I don't think we need those two global parameters on SKL */
2866 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2867 struct intel_plane *intel_plane = to_intel_plane(plane);
2869 config->sprites_enabled |= intel_plane->wm.enabled;
2870 config->sprites_scaled |= intel_plane->wm.scaled;
2874 static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
2875 struct skl_pipe_wm_parameters *p)
2877 struct drm_device *dev = crtc->dev;
2878 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2879 enum pipe pipe = intel_crtc->pipe;
2880 struct drm_plane *plane;
2881 struct drm_framebuffer *fb;
2882 int i = 1; /* Index for sprite planes start */
2884 p->active = intel_crtc->active;
2885 if (p->active) {
2886 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
2887 p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
2889 fb = crtc->primary->state->fb;
2890 /* For planar: Bpp is for uv plane, y_Bpp is for y plane */
2891 if (fb) {
2892 p->plane[0].enabled = true;
2893 p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
2894 drm_format_plane_cpp(fb->pixel_format, 1) : fb->bits_per_pixel / 8;
2895 p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
2896 drm_format_plane_cpp(fb->pixel_format, 0) : 0;
2897 p->plane[0].tiling = fb->modifier[0];
2898 } else {
2899 p->plane[0].enabled = false;
2900 p->plane[0].bytes_per_pixel = 0;
2901 p->plane[0].y_bytes_per_pixel = 0;
2902 p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
2904 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
2905 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
2906 p->plane[0].rotation = crtc->primary->state->rotation;
2908 fb = crtc->cursor->state->fb;
2909 p->cursor.y_bytes_per_pixel = 0;
2910 if (fb) {
2911 p->cursor.enabled = true;
2912 p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8;
2913 p->cursor.horiz_pixels = crtc->cursor->state->crtc_w;
2914 p->cursor.vert_pixels = crtc->cursor->state->crtc_h;
2915 } else {
2916 p->cursor.enabled = false;
2917 p->cursor.bytes_per_pixel = 0;
2918 p->cursor.horiz_pixels = 64;
2919 p->cursor.vert_pixels = 64;
2923 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2924 struct intel_plane *intel_plane = to_intel_plane(plane);
2926 if (intel_plane->pipe == pipe &&
2927 plane->type == DRM_PLANE_TYPE_OVERLAY)
2928 p->plane[i++] = intel_plane->wm;
2932 static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
2933 struct skl_pipe_wm_parameters *p,
2934 struct intel_plane_wm_parameters *p_params,
2935 uint16_t ddb_allocation,
2936 int level,
2937 uint16_t *out_blocks, /* out */
2938 uint8_t *out_lines /* out */)
2940 uint32_t latency = dev_priv->wm.skl_latency[level];
2941 uint32_t method1, method2;
2942 uint32_t plane_bytes_per_line, plane_blocks_per_line;
2943 uint32_t res_blocks, res_lines;
2944 uint32_t selected_result;
2945 uint8_t bytes_per_pixel;
2947 if (latency == 0 || !p->active || !p_params->enabled)
2948 return false;
2950 bytes_per_pixel = p_params->y_bytes_per_pixel ?
2951 p_params->y_bytes_per_pixel :
2952 p_params->bytes_per_pixel;
2953 method1 = skl_wm_method1(p->pixel_rate,
2954 bytes_per_pixel,
2955 latency);
2956 method2 = skl_wm_method2(p->pixel_rate,
2957 p->pipe_htotal,
2958 p_params->horiz_pixels,
2959 bytes_per_pixel,
2960 p_params->tiling,
2961 latency);
2963 plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel;
2964 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2966 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
2967 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
2968 uint32_t min_scanlines = 4;
2969 uint32_t y_tile_minimum;
2970 if (intel_rotation_90_or_270(p_params->rotation)) {
2971 switch (p_params->bytes_per_pixel) {
2972 case 1:
2973 min_scanlines = 16;
2974 break;
2975 case 2:
2976 min_scanlines = 8;
2977 break;
2978 case 8:
2979 WARN(1, "Unsupported pixel depth for rotation");
2982 y_tile_minimum = plane_blocks_per_line * min_scanlines;
2983 selected_result = max(method2, y_tile_minimum);
2984 } else {
2985 if ((ddb_allocation / plane_blocks_per_line) >= 1)
2986 selected_result = min(method1, method2);
2987 else
2988 selected_result = method1;
2991 res_blocks = selected_result + 1;
2992 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
2994 if (level >= 1 && level <= 7) {
2995 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
2996 p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
2997 res_lines += 4;
2998 else
2999 res_blocks++;
3002 if (res_blocks >= ddb_allocation || res_lines > 31)
3003 return false;
3005 *out_blocks = res_blocks;
3006 *out_lines = res_lines;
3008 return true;
3011 static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3012 struct skl_ddb_allocation *ddb,
3013 struct skl_pipe_wm_parameters *p,
3014 enum pipe pipe,
3015 int level,
3016 int num_planes,
3017 struct skl_wm_level *result)
3019 uint16_t ddb_blocks;
3020 int i;
3022 for (i = 0; i < num_planes; i++) {
3023 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3025 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
3026 p, &p->plane[i],
3027 ddb_blocks,
3028 level,
3029 &result->plane_res_b[i],
3030 &result->plane_res_l[i]);
3033 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
3034 result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor,
3035 ddb_blocks, level,
3036 &result->cursor_res_b,
3037 &result->cursor_res_l);
3040 static uint32_t
3041 skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
3043 if (!to_intel_crtc(crtc)->active)
3044 return 0;
3046 return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
3050 static void skl_compute_transition_wm(struct drm_crtc *crtc,
3051 struct skl_pipe_wm_parameters *params,
3052 struct skl_wm_level *trans_wm /* out */)
3054 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3055 int i;
3057 if (!params->active)
3058 return;
3060 /* Until we know more, just disable transition WMs */
3061 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3062 trans_wm->plane_en[i] = false;
3063 trans_wm->cursor_en = false;
3066 static void skl_compute_pipe_wm(struct drm_crtc *crtc,
3067 struct skl_ddb_allocation *ddb,
3068 struct skl_pipe_wm_parameters *params,
3069 struct skl_pipe_wm *pipe_wm)
3071 struct drm_device *dev = crtc->dev;
3072 const struct drm_i915_private *dev_priv = dev->dev_private;
3073 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3074 int level, max_level = ilk_wm_max_level(dev);
3076 for (level = 0; level <= max_level; level++) {
3077 skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
3078 level, intel_num_planes(intel_crtc),
3079 &pipe_wm->wm[level]);
3081 pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
3083 skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
3086 static void skl_compute_wm_results(struct drm_device *dev,
3087 struct skl_pipe_wm_parameters *p,
3088 struct skl_pipe_wm *p_wm,
3089 struct skl_wm_values *r,
3090 struct intel_crtc *intel_crtc)
3092 int level, max_level = ilk_wm_max_level(dev);
3093 enum pipe pipe = intel_crtc->pipe;
3094 uint32_t temp;
3095 int i;
3097 for (level = 0; level <= max_level; level++) {
3098 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3099 temp = 0;
3101 temp |= p_wm->wm[level].plane_res_l[i] <<
3102 PLANE_WM_LINES_SHIFT;
3103 temp |= p_wm->wm[level].plane_res_b[i];
3104 if (p_wm->wm[level].plane_en[i])
3105 temp |= PLANE_WM_EN;
3107 r->plane[pipe][i][level] = temp;
3110 temp = 0;
3112 temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
3113 temp |= p_wm->wm[level].cursor_res_b;
3115 if (p_wm->wm[level].cursor_en)
3116 temp |= PLANE_WM_EN;
3118 r->cursor[pipe][level] = temp;
3122 /* transition WMs */
3123 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3124 temp = 0;
3125 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3126 temp |= p_wm->trans_wm.plane_res_b[i];
3127 if (p_wm->trans_wm.plane_en[i])
3128 temp |= PLANE_WM_EN;
3130 r->plane_trans[pipe][i] = temp;
3133 temp = 0;
3134 temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
3135 temp |= p_wm->trans_wm.cursor_res_b;
3136 if (p_wm->trans_wm.cursor_en)
3137 temp |= PLANE_WM_EN;
3139 r->cursor_trans[pipe] = temp;
3141 r->wm_linetime[pipe] = p_wm->linetime;
3144 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
3145 const struct skl_ddb_entry *entry)
3147 if (entry->end)
3148 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3149 else
3150 I915_WRITE(reg, 0);
3153 static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3154 const struct skl_wm_values *new)
3156 struct drm_device *dev = dev_priv->dev;
3157 struct intel_crtc *crtc;
3159 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
3160 int i, level, max_level = ilk_wm_max_level(dev);
3161 enum pipe pipe = crtc->pipe;
3163 if (!new->dirty[pipe])
3164 continue;
3166 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
3168 for (level = 0; level <= max_level; level++) {
3169 for (i = 0; i < intel_num_planes(crtc); i++)
3170 I915_WRITE(PLANE_WM(pipe, i, level),
3171 new->plane[pipe][i][level]);
3172 I915_WRITE(CUR_WM(pipe, level),
3173 new->cursor[pipe][level]);
3175 for (i = 0; i < intel_num_planes(crtc); i++)
3176 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3177 new->plane_trans[pipe][i]);
3178 I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
3180 for (i = 0; i < intel_num_planes(crtc); i++) {
3181 skl_ddb_entry_write(dev_priv,
3182 PLANE_BUF_CFG(pipe, i),
3183 &new->ddb.plane[pipe][i]);
3184 skl_ddb_entry_write(dev_priv,
3185 PLANE_NV12_BUF_CFG(pipe, i),
3186 &new->ddb.y_plane[pipe][i]);
3189 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3190 &new->ddb.cursor[pipe]);
3195 * When setting up a new DDB allocation arrangement, we need to correctly
3196 * sequence the times at which the new allocations for the pipes are taken into
3197 * account or we'll have pipes fetching from space previously allocated to
3198 * another pipe.
3200 * Roughly the sequence looks like:
3201 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3202 * overlapping with a previous light-up pipe (another way to put it is:
3203 * pipes with their new allocation strickly included into their old ones).
3204 * 2. re-allocate the other pipes that get their allocation reduced
3205 * 3. allocate the pipes having their allocation increased
3207 * Steps 1. and 2. are here to take care of the following case:
3208 * - Initially DDB looks like this:
3209 * | B | C |
3210 * - enable pipe A.
3211 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3212 * allocation
3213 * | A | B | C |
3215 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3218 static void
3219 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
3221 int plane;
3223 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3225 for_each_plane(dev_priv, pipe, plane) {
3226 I915_WRITE(PLANE_SURF(pipe, plane),
3227 I915_READ(PLANE_SURF(pipe, plane)));
3229 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3232 static bool
3233 skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3234 const struct skl_ddb_allocation *new,
3235 enum pipe pipe)
3237 uint16_t old_size, new_size;
3239 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3240 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3242 return old_size != new_size &&
3243 new->pipe[pipe].start >= old->pipe[pipe].start &&
3244 new->pipe[pipe].end <= old->pipe[pipe].end;
3247 static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3248 struct skl_wm_values *new_values)
3250 struct drm_device *dev = dev_priv->dev;
3251 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3252 bool reallocated[I915_MAX_PIPES] = {};
3253 struct intel_crtc *crtc;
3254 enum pipe pipe;
3256 new_ddb = &new_values->ddb;
3257 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3260 * First pass: flush the pipes with the new allocation contained into
3261 * the old space.
3263 * We'll wait for the vblank on those pipes to ensure we can safely
3264 * re-allocate the freed space without this pipe fetching from it.
3266 for_each_intel_crtc(dev, crtc) {
3267 if (!crtc->active)
3268 continue;
3270 pipe = crtc->pipe;
3272 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3273 continue;
3275 skl_wm_flush_pipe(dev_priv, pipe, 1);
3276 intel_wait_for_vblank(dev, pipe);
3278 reallocated[pipe] = true;
3283 * Second pass: flush the pipes that are having their allocation
3284 * reduced, but overlapping with a previous allocation.
3286 * Here as well we need to wait for the vblank to make sure the freed
3287 * space is not used anymore.
3289 for_each_intel_crtc(dev, crtc) {
3290 if (!crtc->active)
3291 continue;
3293 pipe = crtc->pipe;
3295 if (reallocated[pipe])
3296 continue;
3298 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3299 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
3300 skl_wm_flush_pipe(dev_priv, pipe, 2);
3301 intel_wait_for_vblank(dev, pipe);
3302 reallocated[pipe] = true;
3307 * Third pass: flush the pipes that got more space allocated.
3309 * We don't need to actively wait for the update here, next vblank
3310 * will just get more DDB space with the correct WM values.
3312 for_each_intel_crtc(dev, crtc) {
3313 if (!crtc->active)
3314 continue;
3316 pipe = crtc->pipe;
3319 * At this point, only the pipes more space than before are
3320 * left to re-allocate.
3322 if (reallocated[pipe])
3323 continue;
3325 skl_wm_flush_pipe(dev_priv, pipe, 3);
3329 static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3330 struct skl_pipe_wm_parameters *params,
3331 struct intel_wm_config *config,
3332 struct skl_ddb_allocation *ddb, /* out */
3333 struct skl_pipe_wm *pipe_wm /* out */)
3335 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3337 skl_compute_wm_pipe_parameters(crtc, params);
3338 skl_allocate_pipe_ddb(crtc, config, params, ddb);
3339 skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
3341 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
3342 return false;
3344 intel_crtc->wm.skl_active = *pipe_wm;
3346 return true;
3349 static void skl_update_other_pipe_wm(struct drm_device *dev,
3350 struct drm_crtc *crtc,
3351 struct intel_wm_config *config,
3352 struct skl_wm_values *r)
3354 struct intel_crtc *intel_crtc;
3355 struct intel_crtc *this_crtc = to_intel_crtc(crtc);
3358 * If the WM update hasn't changed the allocation for this_crtc (the
3359 * crtc we are currently computing the new WM values for), other
3360 * enabled crtcs will keep the same allocation and we don't need to
3361 * recompute anything for them.
3363 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
3364 return;
3367 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3368 * other active pipes need new DDB allocation and WM values.
3370 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
3371 base.head) {
3372 struct skl_pipe_wm_parameters params = {};
3373 struct skl_pipe_wm pipe_wm = {};
3374 bool wm_changed;
3376 if (this_crtc->pipe == intel_crtc->pipe)
3377 continue;
3379 if (!intel_crtc->active)
3380 continue;
3382 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
3383 &params, config,
3384 &r->ddb, &pipe_wm);
3387 * If we end up re-computing the other pipe WM values, it's
3388 * because it was really needed, so we expect the WM values to
3389 * be different.
3391 WARN_ON(!wm_changed);
3393 skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
3394 r->dirty[intel_crtc->pipe] = true;
3398 static void skl_update_wm(struct drm_crtc *crtc)
3400 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3401 struct drm_device *dev = crtc->dev;
3402 struct drm_i915_private *dev_priv = dev->dev_private;
3403 struct skl_pipe_wm_parameters params = {};
3404 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3405 struct skl_pipe_wm pipe_wm = {};
3406 struct intel_wm_config config = {};
3408 memset(results, 0, sizeof(*results));
3410 skl_compute_wm_global_parameters(dev, &config);
3412 if (!skl_update_pipe_wm(crtc, &params, &config,
3413 &results->ddb, &pipe_wm))
3414 return;
3416 skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
3417 results->dirty[intel_crtc->pipe] = true;
3419 skl_update_other_pipe_wm(dev, crtc, &config, results);
3420 skl_write_wm_values(dev_priv, results);
3421 skl_flush_wm_values(dev_priv, results);
3423 /* store the new configuration */
3424 dev_priv->wm.skl_hw = *results;
3427 static void
3428 skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3429 uint32_t sprite_width, uint32_t sprite_height,
3430 int pixel_size, bool enabled, bool scaled)
3432 struct intel_plane *intel_plane = to_intel_plane(plane);
3433 struct drm_framebuffer *fb = plane->state->fb;
3435 intel_plane->wm.enabled = enabled;
3436 intel_plane->wm.scaled = scaled;
3437 intel_plane->wm.horiz_pixels = sprite_width;
3438 intel_plane->wm.vert_pixels = sprite_height;
3439 intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
3441 /* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
3442 intel_plane->wm.bytes_per_pixel =
3443 (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
3444 drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
3445 intel_plane->wm.y_bytes_per_pixel =
3446 (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
3447 drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
3450 * Framebuffer can be NULL on plane disable, but it does not
3451 * matter for watermarks if we assume no tiling in that case.
3453 if (fb)
3454 intel_plane->wm.tiling = fb->modifier[0];
3455 intel_plane->wm.rotation = plane->state->rotation;
3457 skl_update_wm(crtc);
3460 static void ilk_update_wm(struct drm_crtc *crtc)
3462 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3463 struct drm_device *dev = crtc->dev;
3464 struct drm_i915_private *dev_priv = dev->dev_private;
3465 struct ilk_wm_maximums max;
3466 struct ilk_pipe_wm_parameters params = {};
3467 struct ilk_wm_values results = {};
3468 enum intel_ddb_partitioning partitioning;
3469 struct intel_pipe_wm pipe_wm = {};
3470 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3471 struct intel_wm_config config = {};
3473 ilk_compute_wm_parameters(crtc, &params);
3475 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
3477 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
3478 return;
3480 intel_crtc->wm.active = pipe_wm;
3482 ilk_compute_wm_config(dev, &config);
3484 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3485 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
3487 /* 5/6 split only in single pipe config on IVB+ */
3488 if (INTEL_INFO(dev)->gen >= 7 &&
3489 config.num_pipes_active == 1 && config.sprites_enabled) {
3490 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
3491 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
3493 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
3494 } else {
3495 best_lp_wm = &lp_wm_1_2;
3498 partitioning = (best_lp_wm == &lp_wm_1_2) ?
3499 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
3501 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
3503 ilk_write_wm_values(dev_priv, &results);
3506 static void
3507 ilk_update_sprite_wm(struct drm_plane *plane,
3508 struct drm_crtc *crtc,
3509 uint32_t sprite_width, uint32_t sprite_height,
3510 int pixel_size, bool enabled, bool scaled)
3512 struct drm_device *dev = plane->dev;
3513 struct intel_plane *intel_plane = to_intel_plane(plane);
3515 intel_plane->wm.enabled = enabled;
3516 intel_plane->wm.scaled = scaled;
3517 intel_plane->wm.horiz_pixels = sprite_width;
3518 intel_plane->wm.vert_pixels = sprite_width;
3519 intel_plane->wm.bytes_per_pixel = pixel_size;
3522 * IVB workaround: must disable low power watermarks for at least
3523 * one frame before enabling scaling. LP watermarks can be re-enabled
3524 * when scaling is disabled.
3526 * WaCxSRDisabledForSpriteScaling:ivb
3528 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
3529 intel_wait_for_vblank(dev, intel_plane->pipe);
3531 ilk_update_wm(crtc);
3534 static void skl_pipe_wm_active_state(uint32_t val,
3535 struct skl_pipe_wm *active,
3536 bool is_transwm,
3537 bool is_cursor,
3538 int i,
3539 int level)
3541 bool is_enabled = (val & PLANE_WM_EN) != 0;
3543 if (!is_transwm) {
3544 if (!is_cursor) {
3545 active->wm[level].plane_en[i] = is_enabled;
3546 active->wm[level].plane_res_b[i] =
3547 val & PLANE_WM_BLOCKS_MASK;
3548 active->wm[level].plane_res_l[i] =
3549 (val >> PLANE_WM_LINES_SHIFT) &
3550 PLANE_WM_LINES_MASK;
3551 } else {
3552 active->wm[level].cursor_en = is_enabled;
3553 active->wm[level].cursor_res_b =
3554 val & PLANE_WM_BLOCKS_MASK;
3555 active->wm[level].cursor_res_l =
3556 (val >> PLANE_WM_LINES_SHIFT) &
3557 PLANE_WM_LINES_MASK;
3559 } else {
3560 if (!is_cursor) {
3561 active->trans_wm.plane_en[i] = is_enabled;
3562 active->trans_wm.plane_res_b[i] =
3563 val & PLANE_WM_BLOCKS_MASK;
3564 active->trans_wm.plane_res_l[i] =
3565 (val >> PLANE_WM_LINES_SHIFT) &
3566 PLANE_WM_LINES_MASK;
3567 } else {
3568 active->trans_wm.cursor_en = is_enabled;
3569 active->trans_wm.cursor_res_b =
3570 val & PLANE_WM_BLOCKS_MASK;
3571 active->trans_wm.cursor_res_l =
3572 (val >> PLANE_WM_LINES_SHIFT) &
3573 PLANE_WM_LINES_MASK;
3578 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3580 struct drm_device *dev = crtc->dev;
3581 struct drm_i915_private *dev_priv = dev->dev_private;
3582 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3583 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3584 struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
3585 enum pipe pipe = intel_crtc->pipe;
3586 int level, i, max_level;
3587 uint32_t temp;
3589 max_level = ilk_wm_max_level(dev);
3591 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3593 for (level = 0; level <= max_level; level++) {
3594 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3595 hw->plane[pipe][i][level] =
3596 I915_READ(PLANE_WM(pipe, i, level));
3597 hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
3600 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3601 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3602 hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
3604 if (!intel_crtc->active)
3605 return;
3607 hw->dirty[pipe] = true;
3609 active->linetime = hw->wm_linetime[pipe];
3611 for (level = 0; level <= max_level; level++) {
3612 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3613 temp = hw->plane[pipe][i][level];
3614 skl_pipe_wm_active_state(temp, active, false,
3615 false, i, level);
3617 temp = hw->cursor[pipe][level];
3618 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3621 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3622 temp = hw->plane_trans[pipe][i];
3623 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3626 temp = hw->cursor_trans[pipe];
3627 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3630 void skl_wm_get_hw_state(struct drm_device *dev)
3632 struct drm_i915_private *dev_priv = dev->dev_private;
3633 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3634 struct drm_crtc *crtc;
3636 skl_ddb_get_hw_state(dev_priv, ddb);
3637 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3638 skl_pipe_wm_get_hw_state(crtc);
3641 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3643 struct drm_device *dev = crtc->dev;
3644 struct drm_i915_private *dev_priv = dev->dev_private;
3645 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3646 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3647 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3648 enum pipe pipe = intel_crtc->pipe;
3649 static const unsigned int wm0_pipe_reg[] = {
3650 [PIPE_A] = WM0_PIPEA_ILK,
3651 [PIPE_B] = WM0_PIPEB_ILK,
3652 [PIPE_C] = WM0_PIPEC_IVB,
3655 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
3656 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3657 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3659 active->pipe_enabled = intel_crtc->active;
3661 if (active->pipe_enabled) {
3662 u32 tmp = hw->wm_pipe[pipe];
3665 * For active pipes LP0 watermark is marked as
3666 * enabled, and LP1+ watermaks as disabled since
3667 * we can't really reverse compute them in case
3668 * multiple pipes are active.
3670 active->wm[0].enable = true;
3671 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3672 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3673 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3674 active->linetime = hw->wm_linetime[pipe];
3675 } else {
3676 int level, max_level = ilk_wm_max_level(dev);
3679 * For inactive pipes, all watermark levels
3680 * should be marked as enabled but zeroed,
3681 * which is what we'd compute them to.
3683 for (level = 0; level <= max_level; level++)
3684 active->wm[level].enable = true;
3688 void ilk_wm_get_hw_state(struct drm_device *dev)
3690 struct drm_i915_private *dev_priv = dev->dev_private;
3691 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3692 struct drm_crtc *crtc;
3694 for_each_crtc(dev, crtc)
3695 ilk_pipe_wm_get_hw_state(crtc);
3697 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
3698 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
3699 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
3701 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
3702 if (INTEL_INFO(dev)->gen >= 7) {
3703 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
3704 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
3707 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3708 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
3709 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3710 else if (IS_IVYBRIDGE(dev))
3711 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
3712 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3714 hw->enable_fbc_wm =
3715 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
3719 * intel_update_watermarks - update FIFO watermark values based on current modes
3721 * Calculate watermark values for the various WM regs based on current mode
3722 * and plane configuration.
3724 * There are several cases to deal with here:
3725 * - normal (i.e. non-self-refresh)
3726 * - self-refresh (SR) mode
3727 * - lines are large relative to FIFO size (buffer can hold up to 2)
3728 * - lines are small relative to FIFO size (buffer can hold more than 2
3729 * lines), so need to account for TLB latency
3731 * The normal calculation is:
3732 * watermark = dotclock * bytes per pixel * latency
3733 * where latency is platform & configuration dependent (we assume pessimal
3734 * values here).
3736 * The SR calculation is:
3737 * watermark = (trunc(latency/line time)+1) * surface width *
3738 * bytes per pixel
3739 * where
3740 * line time = htotal / dotclock
3741 * surface width = hdisplay for normal plane and 64 for cursor
3742 * and latency is assumed to be high, as above.
3744 * The final value programmed to the register should always be rounded up,
3745 * and include an extra 2 entries to account for clock crossings.
3747 * We don't use the sprite, so we can ignore that. And on Crestline we have
3748 * to set the non-SR watermarks to 8.
3750 void intel_update_watermarks(struct drm_crtc *crtc)
3752 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
3754 if (dev_priv->display.update_wm)
3755 dev_priv->display.update_wm(crtc);
3758 void intel_update_sprite_watermarks(struct drm_plane *plane,
3759 struct drm_crtc *crtc,
3760 uint32_t sprite_width,
3761 uint32_t sprite_height,
3762 int pixel_size,
3763 bool enabled, bool scaled)
3765 struct drm_i915_private *dev_priv = plane->dev->dev_private;
3767 if (dev_priv->display.update_sprite_wm)
3768 dev_priv->display.update_sprite_wm(plane, crtc,
3769 sprite_width, sprite_height,
3770 pixel_size, enabled, scaled);
3774 * Lock protecting IPS related data structures
3776 DEFINE_SPINLOCK(mchdev_lock);
3778 /* Global for IPS driver to get at the current i915 device. Protected by
3779 * mchdev_lock. */
3780 static struct drm_i915_private *i915_mch_dev;
3782 bool ironlake_set_drps(struct drm_device *dev, u8 val)
3784 struct drm_i915_private *dev_priv = dev->dev_private;
3785 u16 rgvswctl;
3787 assert_spin_locked(&mchdev_lock);
3789 rgvswctl = I915_READ16(MEMSWCTL);
3790 if (rgvswctl & MEMCTL_CMD_STS) {
3791 DRM_DEBUG("gpu busy, RCS change rejected\n");
3792 return false; /* still busy with another command */
3795 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
3796 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
3797 I915_WRITE16(MEMSWCTL, rgvswctl);
3798 POSTING_READ16(MEMSWCTL);
3800 rgvswctl |= MEMCTL_CMD_STS;
3801 I915_WRITE16(MEMSWCTL, rgvswctl);
3803 return true;
3806 static void ironlake_enable_drps(struct drm_device *dev)
3808 struct drm_i915_private *dev_priv = dev->dev_private;
3809 u32 rgvmodectl = I915_READ(MEMMODECTL);
3810 u8 fmax, fmin, fstart, vstart;
3812 spin_lock_irq(&mchdev_lock);
3814 /* Enable temp reporting */
3815 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
3816 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
3818 /* 100ms RC evaluation intervals */
3819 I915_WRITE(RCUPEI, 100000);
3820 I915_WRITE(RCDNEI, 100000);
3822 /* Set max/min thresholds to 90ms and 80ms respectively */
3823 I915_WRITE(RCBMAXAVG, 90000);
3824 I915_WRITE(RCBMINAVG, 80000);
3826 I915_WRITE(MEMIHYST, 1);
3828 /* Set up min, max, and cur for interrupt handling */
3829 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
3830 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
3831 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
3832 MEMMODE_FSTART_SHIFT;
3834 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
3835 PXVFREQ_PX_SHIFT;
3837 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
3838 dev_priv->ips.fstart = fstart;
3840 dev_priv->ips.max_delay = fstart;
3841 dev_priv->ips.min_delay = fmin;
3842 dev_priv->ips.cur_delay = fstart;
3844 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3845 fmax, fmin, fstart);
3847 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
3850 * Interrupts will be enabled in ironlake_irq_postinstall
3853 I915_WRITE(VIDSTART, vstart);
3854 POSTING_READ(VIDSTART);
3856 rgvmodectl |= MEMMODE_SWMODE_EN;
3857 I915_WRITE(MEMMODECTL, rgvmodectl);
3859 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
3860 DRM_ERROR("stuck trying to change perf mode\n");
3861 mdelay(1);
3863 ironlake_set_drps(dev, fstart);
3865 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
3866 I915_READ(0x112e0);
3867 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
3868 dev_priv->ips.last_count2 = I915_READ(0x112f4);
3869 dev_priv->ips.last_time2 = ktime_get_raw_ns();
3871 spin_unlock_irq(&mchdev_lock);
3874 static void ironlake_disable_drps(struct drm_device *dev)
3876 struct drm_i915_private *dev_priv = dev->dev_private;
3877 u16 rgvswctl;
3879 spin_lock_irq(&mchdev_lock);
3881 rgvswctl = I915_READ16(MEMSWCTL);
3883 /* Ack interrupts, disable EFC interrupt */
3884 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3885 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3886 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3887 I915_WRITE(DEIIR, DE_PCU_EVENT);
3888 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3890 /* Go back to the starting frequency */
3891 ironlake_set_drps(dev, dev_priv->ips.fstart);
3892 mdelay(1);
3893 rgvswctl |= MEMCTL_CMD_STS;
3894 I915_WRITE(MEMSWCTL, rgvswctl);
3895 mdelay(1);
3897 spin_unlock_irq(&mchdev_lock);
3900 /* There's a funny hw issue where the hw returns all 0 when reading from
3901 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3902 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3903 * all limits and the gpu stuck at whatever frequency it is at atm).
3905 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
3907 u32 limits;
3909 /* Only set the down limit when we've reached the lowest level to avoid
3910 * getting more interrupts, otherwise leave this clear. This prevents a
3911 * race in the hw when coming out of rc6: There's a tiny window where
3912 * the hw runs at the minimal clock before selecting the desired
3913 * frequency, if the down threshold expires in that window we will not
3914 * receive a down interrupt. */
3915 if (IS_GEN9(dev_priv->dev)) {
3916 limits = (dev_priv->rps.max_freq_softlimit) << 23;
3917 if (val <= dev_priv->rps.min_freq_softlimit)
3918 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
3919 } else {
3920 limits = dev_priv->rps.max_freq_softlimit << 24;
3921 if (val <= dev_priv->rps.min_freq_softlimit)
3922 limits |= dev_priv->rps.min_freq_softlimit << 16;
3925 return limits;
3928 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3930 int new_power;
3931 u32 threshold_up = 0, threshold_down = 0; /* in % */
3932 u32 ei_up = 0, ei_down = 0;
3934 new_power = dev_priv->rps.power;
3935 switch (dev_priv->rps.power) {
3936 case LOW_POWER:
3937 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
3938 new_power = BETWEEN;
3939 break;
3941 case BETWEEN:
3942 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
3943 new_power = LOW_POWER;
3944 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
3945 new_power = HIGH_POWER;
3946 break;
3948 case HIGH_POWER:
3949 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
3950 new_power = BETWEEN;
3951 break;
3953 /* Max/min bins are special */
3954 if (val <= dev_priv->rps.min_freq_softlimit)
3955 new_power = LOW_POWER;
3956 if (val >= dev_priv->rps.max_freq_softlimit)
3957 new_power = HIGH_POWER;
3958 if (new_power == dev_priv->rps.power)
3959 return;
3961 /* Note the units here are not exactly 1us, but 1280ns. */
3962 switch (new_power) {
3963 case LOW_POWER:
3964 /* Upclock if more than 95% busy over 16ms */
3965 ei_up = 16000;
3966 threshold_up = 95;
3968 /* Downclock if less than 85% busy over 32ms */
3969 ei_down = 32000;
3970 threshold_down = 85;
3971 break;
3973 case BETWEEN:
3974 /* Upclock if more than 90% busy over 13ms */
3975 ei_up = 13000;
3976 threshold_up = 90;
3978 /* Downclock if less than 75% busy over 32ms */
3979 ei_down = 32000;
3980 threshold_down = 75;
3981 break;
3983 case HIGH_POWER:
3984 /* Upclock if more than 85% busy over 10ms */
3985 ei_up = 10000;
3986 threshold_up = 85;
3988 /* Downclock if less than 60% busy over 32ms */
3989 ei_down = 32000;
3990 threshold_down = 60;
3991 break;
3994 I915_WRITE(GEN6_RP_UP_EI,
3995 GT_INTERVAL_FROM_US(dev_priv, ei_up));
3996 I915_WRITE(GEN6_RP_UP_THRESHOLD,
3997 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
3999 I915_WRITE(GEN6_RP_DOWN_EI,
4000 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4001 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4002 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
4004 I915_WRITE(GEN6_RP_CONTROL,
4005 GEN6_RP_MEDIA_TURBO |
4006 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4007 GEN6_RP_MEDIA_IS_GFX |
4008 GEN6_RP_ENABLE |
4009 GEN6_RP_UP_BUSY_AVG |
4010 GEN6_RP_DOWN_IDLE_AVG);
4012 dev_priv->rps.power = new_power;
4013 dev_priv->rps.up_threshold = threshold_up;
4014 dev_priv->rps.down_threshold = threshold_down;
4015 dev_priv->rps.last_adj = 0;
4018 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4020 u32 mask = 0;
4022 if (val > dev_priv->rps.min_freq_softlimit)
4023 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4024 if (val < dev_priv->rps.max_freq_softlimit)
4025 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4027 mask &= dev_priv->pm_rps_events;
4029 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
4032 /* gen6_set_rps is called to update the frequency request, but should also be
4033 * called when the range (min_delay and max_delay) is modified so that we can
4034 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4035 static void gen6_set_rps(struct drm_device *dev, u8 val)
4037 struct drm_i915_private *dev_priv = dev->dev_private;
4039 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4040 WARN_ON(val > dev_priv->rps.max_freq);
4041 WARN_ON(val < dev_priv->rps.min_freq);
4043 /* min/max delay may still have been modified so be sure to
4044 * write the limits value.
4046 if (val != dev_priv->rps.cur_freq) {
4047 gen6_set_rps_thresholds(dev_priv, val);
4049 if (IS_GEN9(dev))
4050 I915_WRITE(GEN6_RPNSWREQ,
4051 GEN9_FREQUENCY(val));
4052 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4053 I915_WRITE(GEN6_RPNSWREQ,
4054 HSW_FREQUENCY(val));
4055 else
4056 I915_WRITE(GEN6_RPNSWREQ,
4057 GEN6_FREQUENCY(val) |
4058 GEN6_OFFSET(0) |
4059 GEN6_AGGRESSIVE_TURBO);
4062 /* Make sure we continue to get interrupts
4063 * until we hit the minimum or maximum frequencies.
4065 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
4066 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4068 POSTING_READ(GEN6_RPNSWREQ);
4070 dev_priv->rps.cur_freq = val;
4071 trace_intel_gpu_freq_change(val * 50);
4074 static void valleyview_set_rps(struct drm_device *dev, u8 val)
4076 struct drm_i915_private *dev_priv = dev->dev_private;
4078 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4079 WARN_ON(val > dev_priv->rps.max_freq);
4080 WARN_ON(val < dev_priv->rps.min_freq);
4082 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
4083 "Odd GPU freq value\n"))
4084 val &= ~1;
4086 if (val != dev_priv->rps.cur_freq) {
4087 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4088 if (!IS_CHERRYVIEW(dev_priv))
4089 gen6_set_rps_thresholds(dev_priv, val);
4092 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4094 dev_priv->rps.cur_freq = val;
4095 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4098 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
4100 * * If Gfx is Idle, then
4101 * 1. Forcewake Media well.
4102 * 2. Request idle freq.
4103 * 3. Release Forcewake of Media well.
4105 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4107 u32 val = dev_priv->rps.idle_freq;
4109 if (dev_priv->rps.cur_freq <= val)
4110 return;
4112 /* Wake up the media well, as that takes a lot less
4113 * power than the Render well. */
4114 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4115 valleyview_set_rps(dev_priv->dev, val);
4116 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
4119 void gen6_rps_busy(struct drm_i915_private *dev_priv)
4121 mutex_lock(&dev_priv->rps.hw_lock);
4122 if (dev_priv->rps.enabled) {
4123 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4124 gen6_rps_reset_ei(dev_priv);
4125 I915_WRITE(GEN6_PMINTRMSK,
4126 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4128 mutex_unlock(&dev_priv->rps.hw_lock);
4131 void gen6_rps_idle(struct drm_i915_private *dev_priv)
4133 struct drm_device *dev = dev_priv->dev;
4135 mutex_lock(&dev_priv->rps.hw_lock);
4136 if (dev_priv->rps.enabled) {
4137 if (IS_VALLEYVIEW(dev))
4138 vlv_set_rps_idle(dev_priv);
4139 else
4140 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4141 dev_priv->rps.last_adj = 0;
4142 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4144 mutex_unlock(&dev_priv->rps.hw_lock);
4146 spin_lock(&dev_priv->rps.client_lock);
4147 while (!list_empty(&dev_priv->rps.clients))
4148 list_del_init(dev_priv->rps.clients.next);
4149 spin_unlock(&dev_priv->rps.client_lock);
4152 void gen6_rps_boost(struct drm_i915_private *dev_priv,
4153 struct intel_rps_client *rps,
4154 unsigned long submitted)
4156 /* This is intentionally racy! We peek at the state here, then
4157 * validate inside the RPS worker.
4159 if (!(dev_priv->mm.busy &&
4160 dev_priv->rps.enabled &&
4161 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
4162 return;
4164 /* Force a RPS boost (and don't count it against the client) if
4165 * the GPU is severely congested.
4167 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
4168 rps = NULL;
4170 spin_lock(&dev_priv->rps.client_lock);
4171 if (rps == NULL || list_empty(&rps->link)) {
4172 spin_lock_irq(&dev_priv->irq_lock);
4173 if (dev_priv->rps.interrupts_enabled) {
4174 dev_priv->rps.client_boost = true;
4175 queue_work(dev_priv->wq, &dev_priv->rps.work);
4177 spin_unlock_irq(&dev_priv->irq_lock);
4179 if (rps != NULL) {
4180 list_add(&rps->link, &dev_priv->rps.clients);
4181 rps->boosts++;
4182 } else
4183 dev_priv->rps.boosts++;
4185 spin_unlock(&dev_priv->rps.client_lock);
4188 void intel_set_rps(struct drm_device *dev, u8 val)
4190 if (IS_VALLEYVIEW(dev))
4191 valleyview_set_rps(dev, val);
4192 else
4193 gen6_set_rps(dev, val);
4196 static void gen9_disable_rps(struct drm_device *dev)
4198 struct drm_i915_private *dev_priv = dev->dev_private;
4200 I915_WRITE(GEN6_RC_CONTROL, 0);
4201 I915_WRITE(GEN9_PG_ENABLE, 0);
4204 static void gen6_disable_rps(struct drm_device *dev)
4206 struct drm_i915_private *dev_priv = dev->dev_private;
4208 I915_WRITE(GEN6_RC_CONTROL, 0);
4209 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4212 static void cherryview_disable_rps(struct drm_device *dev)
4214 struct drm_i915_private *dev_priv = dev->dev_private;
4216 I915_WRITE(GEN6_RC_CONTROL, 0);
4219 static void valleyview_disable_rps(struct drm_device *dev)
4221 struct drm_i915_private *dev_priv = dev->dev_private;
4223 /* we're doing forcewake before Disabling RC6,
4224 * This what the BIOS expects when going into suspend */
4225 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4227 I915_WRITE(GEN6_RC_CONTROL, 0);
4229 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4232 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4234 if (IS_VALLEYVIEW(dev)) {
4235 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4236 mode = GEN6_RC_CTL_RC6_ENABLE;
4237 else
4238 mode = 0;
4240 if (HAS_RC6p(dev))
4241 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4242 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
4243 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
4244 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
4246 else
4247 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4248 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
4251 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4253 /* No RC6 before Ironlake */
4254 if (INTEL_INFO(dev)->gen < 5)
4255 return 0;
4257 /* RC6 is only on Ironlake mobile not on desktop */
4258 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
4259 return 0;
4261 /* Respect the kernel parameter if it is set */
4262 if (enable_rc6 >= 0) {
4263 int mask;
4265 if (HAS_RC6p(dev))
4266 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4267 INTEL_RC6pp_ENABLE;
4268 else
4269 mask = INTEL_RC6_ENABLE;
4271 if ((enable_rc6 & mask) != enable_rc6)
4272 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4273 enable_rc6 & mask, enable_rc6, mask);
4275 return enable_rc6 & mask;
4278 /* Disable RC6 on Ironlake */
4279 if (INTEL_INFO(dev)->gen == 5)
4280 return 0;
4282 if (IS_IVYBRIDGE(dev))
4283 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
4285 return INTEL_RC6_ENABLE;
4288 int intel_enable_rc6(const struct drm_device *dev)
4290 return i915.enable_rc6;
4293 static void gen6_init_rps_frequencies(struct drm_device *dev)
4295 struct drm_i915_private *dev_priv = dev->dev_private;
4296 uint32_t rp_state_cap;
4297 u32 ddcc_status = 0;
4298 int ret;
4300 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4301 /* All of these values are in units of 50MHz */
4302 dev_priv->rps.cur_freq = 0;
4303 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4304 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4305 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4306 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4307 if (IS_SKYLAKE(dev)) {
4308 /* Store the frequency values in 16.66 MHZ units, which is
4309 the natural hardware unit for SKL */
4310 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
4311 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
4312 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
4314 /* hw_max = RP0 until we check for overclocking */
4315 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4317 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4318 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4319 ret = sandybridge_pcode_read(dev_priv,
4320 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4321 &ddcc_status);
4322 if (0 == ret)
4323 dev_priv->rps.efficient_freq =
4324 clamp_t(u8,
4325 ((ddcc_status >> 8) & 0xff),
4326 dev_priv->rps.min_freq,
4327 dev_priv->rps.max_freq);
4330 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4332 /* Preserve min/max settings in case of re-init */
4333 if (dev_priv->rps.max_freq_softlimit == 0)
4334 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4336 if (dev_priv->rps.min_freq_softlimit == 0) {
4337 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4338 dev_priv->rps.min_freq_softlimit =
4339 max_t(int, dev_priv->rps.efficient_freq,
4340 intel_freq_opcode(dev_priv, 450));
4341 else
4342 dev_priv->rps.min_freq_softlimit =
4343 dev_priv->rps.min_freq;
4347 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
4348 static void gen9_enable_rps(struct drm_device *dev)
4350 struct drm_i915_private *dev_priv = dev->dev_private;
4352 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4354 gen6_init_rps_frequencies(dev);
4356 /* Program defaults and thresholds for RPS*/
4357 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4358 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
4360 /* 1 second timeout*/
4361 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
4362 GT_INTERVAL_FROM_US(dev_priv, 1000000));
4364 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
4366 /* Leaning on the below call to gen6_set_rps to program/setup the
4367 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4368 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4369 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4370 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4372 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4375 static void gen9_enable_rc6(struct drm_device *dev)
4377 struct drm_i915_private *dev_priv = dev->dev_private;
4378 struct intel_engine_cs *ring;
4379 uint32_t rc6_mask = 0;
4380 int unused;
4382 /* 1a: Software RC state - RC0 */
4383 I915_WRITE(GEN6_RC_STATE, 0);
4385 /* 1b: Get forcewake during program sequence. Although the driver
4386 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4387 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4389 /* 2a: Disable RC states. */
4390 I915_WRITE(GEN6_RC_CONTROL, 0);
4392 /* 2b: Program RC6 thresholds.*/
4393 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4394 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4395 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4396 for_each_ring(ring, dev_priv, unused)
4397 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4398 I915_WRITE(GEN6_RC_SLEEP, 0);
4399 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4401 /* 2c: Program Coarse Power Gating Policies. */
4402 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
4403 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4405 /* 3a: Enable RC6 */
4406 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4407 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4408 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4409 "on" : "off");
4410 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4411 GEN6_RC_CTL_EI_MODE(1) |
4412 rc6_mask);
4415 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4416 * WaDisableRenderPowerGating:skl,bxt - Render PG need to be disabled with RC6.
4418 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4419 GEN9_MEDIA_PG_ENABLE : 0);
4422 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4426 static void gen8_enable_rps(struct drm_device *dev)
4428 struct drm_i915_private *dev_priv = dev->dev_private;
4429 struct intel_engine_cs *ring;
4430 uint32_t rc6_mask = 0;
4431 int unused;
4433 /* 1a: Software RC state - RC0 */
4434 I915_WRITE(GEN6_RC_STATE, 0);
4436 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4437 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4438 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4440 /* 2a: Disable RC states. */
4441 I915_WRITE(GEN6_RC_CONTROL, 0);
4443 /* Initialize rps frequencies */
4444 gen6_init_rps_frequencies(dev);
4446 /* 2b: Program RC6 thresholds.*/
4447 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4448 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4449 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4450 for_each_ring(ring, dev_priv, unused)
4451 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4452 I915_WRITE(GEN6_RC_SLEEP, 0);
4453 if (IS_BROADWELL(dev))
4454 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4455 else
4456 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4458 /* 3: Enable RC6 */
4459 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4460 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4461 intel_print_rc6_info(dev, rc6_mask);
4462 if (IS_BROADWELL(dev))
4463 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4464 GEN7_RC_CTL_TO_MODE |
4465 rc6_mask);
4466 else
4467 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4468 GEN6_RC_CTL_EI_MODE(1) |
4469 rc6_mask);
4471 /* 4 Program defaults and thresholds for RPS*/
4472 I915_WRITE(GEN6_RPNSWREQ,
4473 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4474 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4475 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4476 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4477 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
4479 /* Docs recommend 900MHz, and 300 MHz respectively */
4480 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
4481 dev_priv->rps.max_freq_softlimit << 24 |
4482 dev_priv->rps.min_freq_softlimit << 16);
4484 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
4485 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
4486 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
4487 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
4489 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4491 /* 5: Enable RPS */
4492 I915_WRITE(GEN6_RP_CONTROL,
4493 GEN6_RP_MEDIA_TURBO |
4494 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4495 GEN6_RP_MEDIA_IS_GFX |
4496 GEN6_RP_ENABLE |
4497 GEN6_RP_UP_BUSY_AVG |
4498 GEN6_RP_DOWN_IDLE_AVG);
4500 /* 6: Ring frequency + overclocking (our driver does this later */
4502 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4503 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4505 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4508 static void gen6_enable_rps(struct drm_device *dev)
4510 struct drm_i915_private *dev_priv = dev->dev_private;
4511 struct intel_engine_cs *ring;
4512 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
4513 u32 gtfifodbg;
4514 int rc6_mode;
4515 int i, ret;
4517 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4519 /* Here begins a magic sequence of register writes to enable
4520 * auto-downclocking.
4522 * Perhaps there might be some value in exposing these to
4523 * userspace...
4525 I915_WRITE(GEN6_RC_STATE, 0);
4527 /* Clear the DBG now so we don't confuse earlier errors */
4528 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4529 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
4530 I915_WRITE(GTFIFODBG, gtfifodbg);
4533 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4535 /* Initialize rps frequencies */
4536 gen6_init_rps_frequencies(dev);
4538 /* disable the counters and set deterministic thresholds */
4539 I915_WRITE(GEN6_RC_CONTROL, 0);
4541 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
4542 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
4543 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
4544 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4545 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4547 for_each_ring(ring, dev_priv, i)
4548 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4550 I915_WRITE(GEN6_RC_SLEEP, 0);
4551 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
4552 if (IS_IVYBRIDGE(dev))
4553 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
4554 else
4555 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
4556 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
4557 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
4559 /* Check if we are enabling RC6 */
4560 rc6_mode = intel_enable_rc6(dev_priv->dev);
4561 if (rc6_mode & INTEL_RC6_ENABLE)
4562 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
4564 /* We don't use those on Haswell */
4565 if (!IS_HASWELL(dev)) {
4566 if (rc6_mode & INTEL_RC6p_ENABLE)
4567 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
4569 if (rc6_mode & INTEL_RC6pp_ENABLE)
4570 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
4573 intel_print_rc6_info(dev, rc6_mask);
4575 I915_WRITE(GEN6_RC_CONTROL,
4576 rc6_mask |
4577 GEN6_RC_CTL_EI_MODE(1) |
4578 GEN6_RC_CTL_HW_ENABLE);
4580 /* Power down if completely idle for over 50ms */
4581 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
4582 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4584 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
4585 if (ret)
4586 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
4588 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
4589 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
4590 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
4591 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
4592 (pcu_mbox & 0xff) * 50);
4593 dev_priv->rps.max_freq = pcu_mbox & 0xff;
4596 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4597 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4599 rc6vids = 0;
4600 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
4601 if (IS_GEN6(dev) && ret) {
4602 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
4603 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
4604 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
4605 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
4606 rc6vids &= 0xffff00;
4607 rc6vids |= GEN6_ENCODE_RC6_VID(450);
4608 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
4609 if (ret)
4610 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
4613 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4616 static void __gen6_update_ring_freq(struct drm_device *dev)
4618 struct drm_i915_private *dev_priv = dev->dev_private;
4619 int min_freq = 15;
4620 unsigned int gpu_freq;
4621 unsigned int max_ia_freq, min_ring_freq;
4622 int scaling_factor = 180;
4623 struct cpufreq_policy *policy;
4625 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4627 policy = cpufreq_cpu_get(0);
4628 if (policy) {
4629 max_ia_freq = policy->cpuinfo.max_freq;
4630 cpufreq_cpu_put(policy);
4631 } else {
4633 * Default to measured freq if none found, PCU will ensure we
4634 * don't go over
4636 max_ia_freq = tsc_khz;
4639 /* Convert from kHz to MHz */
4640 max_ia_freq /= 1000;
4642 min_ring_freq = I915_READ(DCLK) & 0xf;
4643 /* convert DDR frequency from units of 266.6MHz to bandwidth */
4644 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
4647 * For each potential GPU frequency, load a ring frequency we'd like
4648 * to use for memory access. We do this by specifying the IA frequency
4649 * the PCU should use as a reference to determine the ring frequency.
4651 for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
4652 gpu_freq--) {
4653 int diff = dev_priv->rps.max_freq - gpu_freq;
4654 unsigned int ia_freq = 0, ring_freq = 0;
4656 if (INTEL_INFO(dev)->gen >= 8) {
4657 /* max(2 * GT, DDR). NB: GT is 50MHz units */
4658 ring_freq = max(min_ring_freq, gpu_freq);
4659 } else if (IS_HASWELL(dev)) {
4660 ring_freq = mult_frac(gpu_freq, 5, 4);
4661 ring_freq = max(min_ring_freq, ring_freq);
4662 /* leave ia_freq as the default, chosen by cpufreq */
4663 } else {
4664 /* On older processors, there is no separate ring
4665 * clock domain, so in order to boost the bandwidth
4666 * of the ring, we need to upclock the CPU (ia_freq).
4668 * For GPU frequencies less than 750MHz,
4669 * just use the lowest ring freq.
4671 if (gpu_freq < min_freq)
4672 ia_freq = 800;
4673 else
4674 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
4675 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
4678 sandybridge_pcode_write(dev_priv,
4679 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
4680 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
4681 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
4682 gpu_freq);
4686 void gen6_update_ring_freq(struct drm_device *dev)
4688 struct drm_i915_private *dev_priv = dev->dev_private;
4690 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
4691 return;
4693 mutex_lock(&dev_priv->rps.hw_lock);
4694 __gen6_update_ring_freq(dev);
4695 mutex_unlock(&dev_priv->rps.hw_lock);
4698 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
4700 struct drm_device *dev = dev_priv->dev;
4701 u32 val, rp0;
4703 if (dev->pdev->revision >= 0x20) {
4704 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
4706 switch (INTEL_INFO(dev)->eu_total) {
4707 case 8:
4708 /* (2 * 4) config */
4709 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
4710 break;
4711 case 12:
4712 /* (2 * 6) config */
4713 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
4714 break;
4715 case 16:
4716 /* (2 * 8) config */
4717 default:
4718 /* Setting (2 * 8) Min RP0 for any other combination */
4719 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
4720 break;
4722 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
4723 } else {
4724 /* For pre-production hardware */
4725 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
4726 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
4727 PUNIT_GPU_STATUS_MAX_FREQ_MASK;
4729 return rp0;
4732 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
4734 u32 val, rpe;
4736 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
4737 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
4739 return rpe;
4742 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
4744 struct drm_device *dev = dev_priv->dev;
4745 u32 val, rp1;
4747 if (dev->pdev->revision >= 0x20) {
4748 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
4749 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
4750 } else {
4751 /* For pre-production hardware */
4752 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4753 rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
4754 PUNIT_GPU_STATUS_MAX_FREQ_MASK);
4756 return rp1;
4759 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
4761 u32 val, rp1;
4763 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
4765 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
4767 return rp1;
4770 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
4772 u32 val, rp0;
4774 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
4776 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
4777 /* Clamp to max */
4778 rp0 = min_t(u32, rp0, 0xea);
4780 return rp0;
4783 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
4785 u32 val, rpe;
4787 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
4788 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
4789 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
4790 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
4792 return rpe;
4795 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
4797 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
4800 /* Check that the pctx buffer wasn't move under us. */
4801 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
4803 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
4805 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
4806 dev_priv->vlv_pctx->stolen->start);
4810 /* Check that the pcbr address is not empty. */
4811 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
4813 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
4815 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
4818 static void cherryview_setup_pctx(struct drm_device *dev)
4820 struct drm_i915_private *dev_priv = dev->dev_private;
4821 unsigned long pctx_paddr, paddr;
4822 struct i915_gtt *gtt = &dev_priv->gtt;
4823 u32 pcbr;
4824 int pctx_size = 32*1024;
4826 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4828 pcbr = I915_READ(VLV_PCBR);
4829 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
4830 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
4831 paddr = (dev_priv->mm.stolen_base +
4832 (gtt->stolen_size - pctx_size));
4834 pctx_paddr = (paddr & (~4095));
4835 I915_WRITE(VLV_PCBR, pctx_paddr);
4838 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
4841 static void valleyview_setup_pctx(struct drm_device *dev)
4843 struct drm_i915_private *dev_priv = dev->dev_private;
4844 struct drm_i915_gem_object *pctx;
4845 unsigned long pctx_paddr;
4846 u32 pcbr;
4847 int pctx_size = 24*1024;
4849 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4851 pcbr = I915_READ(VLV_PCBR);
4852 if (pcbr) {
4853 /* BIOS set it up already, grab the pre-alloc'd space */
4854 int pcbr_offset;
4856 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
4857 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
4858 pcbr_offset,
4859 I915_GTT_OFFSET_NONE,
4860 pctx_size);
4861 goto out;
4864 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
4867 * From the Gunit register HAS:
4868 * The Gfx driver is expected to program this register and ensure
4869 * proper allocation within Gfx stolen memory. For example, this
4870 * register should be programmed such than the PCBR range does not
4871 * overlap with other ranges, such as the frame buffer, protected
4872 * memory, or any other relevant ranges.
4874 pctx = i915_gem_object_create_stolen(dev, pctx_size);
4875 if (!pctx) {
4876 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
4877 return;
4880 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
4881 I915_WRITE(VLV_PCBR, pctx_paddr);
4883 out:
4884 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
4885 dev_priv->vlv_pctx = pctx;
4888 static void valleyview_cleanup_pctx(struct drm_device *dev)
4890 struct drm_i915_private *dev_priv = dev->dev_private;
4892 if (WARN_ON(!dev_priv->vlv_pctx))
4893 return;
4895 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
4896 dev_priv->vlv_pctx = NULL;
4899 static void valleyview_init_gt_powersave(struct drm_device *dev)
4901 struct drm_i915_private *dev_priv = dev->dev_private;
4902 u32 val;
4904 valleyview_setup_pctx(dev);
4906 mutex_lock(&dev_priv->rps.hw_lock);
4908 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4909 switch ((val >> 6) & 3) {
4910 case 0:
4911 case 1:
4912 dev_priv->mem_freq = 800;
4913 break;
4914 case 2:
4915 dev_priv->mem_freq = 1066;
4916 break;
4917 case 3:
4918 dev_priv->mem_freq = 1333;
4919 break;
4921 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
4923 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
4924 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4925 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4926 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4927 dev_priv->rps.max_freq);
4929 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
4930 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4931 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4932 dev_priv->rps.efficient_freq);
4934 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
4935 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
4936 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4937 dev_priv->rps.rp1_freq);
4939 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
4940 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4941 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4942 dev_priv->rps.min_freq);
4944 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4946 /* Preserve min/max settings in case of re-init */
4947 if (dev_priv->rps.max_freq_softlimit == 0)
4948 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4950 if (dev_priv->rps.min_freq_softlimit == 0)
4951 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4953 mutex_unlock(&dev_priv->rps.hw_lock);
4956 static void cherryview_init_gt_powersave(struct drm_device *dev)
4958 struct drm_i915_private *dev_priv = dev->dev_private;
4959 u32 val;
4961 cherryview_setup_pctx(dev);
4963 mutex_lock(&dev_priv->rps.hw_lock);
4965 mutex_lock(&dev_priv->sb_lock);
4966 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
4967 mutex_unlock(&dev_priv->sb_lock);
4969 switch ((val >> 2) & 0x7) {
4970 case 0:
4971 case 1:
4972 dev_priv->rps.cz_freq = 200;
4973 dev_priv->mem_freq = 1600;
4974 break;
4975 case 2:
4976 dev_priv->rps.cz_freq = 267;
4977 dev_priv->mem_freq = 1600;
4978 break;
4979 case 3:
4980 dev_priv->rps.cz_freq = 333;
4981 dev_priv->mem_freq = 2000;
4982 break;
4983 case 4:
4984 dev_priv->rps.cz_freq = 320;
4985 dev_priv->mem_freq = 1600;
4986 break;
4987 case 5:
4988 dev_priv->rps.cz_freq = 400;
4989 dev_priv->mem_freq = 1600;
4990 break;
4992 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
4994 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
4995 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4996 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4997 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4998 dev_priv->rps.max_freq);
5000 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5001 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5002 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5003 dev_priv->rps.efficient_freq);
5005 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5006 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5007 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5008 dev_priv->rps.rp1_freq);
5010 /* PUnit validated range is only [RPe, RP0] */
5011 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
5012 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5013 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5014 dev_priv->rps.min_freq);
5016 WARN_ONCE((dev_priv->rps.max_freq |
5017 dev_priv->rps.efficient_freq |
5018 dev_priv->rps.rp1_freq |
5019 dev_priv->rps.min_freq) & 1,
5020 "Odd GPU freq values\n");
5022 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5024 /* Preserve min/max settings in case of re-init */
5025 if (dev_priv->rps.max_freq_softlimit == 0)
5026 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5028 if (dev_priv->rps.min_freq_softlimit == 0)
5029 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5031 mutex_unlock(&dev_priv->rps.hw_lock);
5034 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
5036 valleyview_cleanup_pctx(dev);
5039 static void cherryview_enable_rps(struct drm_device *dev)
5041 struct drm_i915_private *dev_priv = dev->dev_private;
5042 struct intel_engine_cs *ring;
5043 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5044 int i;
5046 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5048 gtfifodbg = I915_READ(GTFIFODBG);
5049 if (gtfifodbg) {
5050 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5051 gtfifodbg);
5052 I915_WRITE(GTFIFODBG, gtfifodbg);
5055 cherryview_check_pctx(dev_priv);
5057 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5058 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5059 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5061 /* Disable RC states. */
5062 I915_WRITE(GEN6_RC_CONTROL, 0);
5064 /* 2a: Program RC6 thresholds.*/
5065 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5066 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5067 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5069 for_each_ring(ring, dev_priv, i)
5070 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5071 I915_WRITE(GEN6_RC_SLEEP, 0);
5073 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5074 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
5076 /* allows RC6 residency counter to work */
5077 I915_WRITE(VLV_COUNTER_CONTROL,
5078 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5079 VLV_MEDIA_RC6_COUNT_EN |
5080 VLV_RENDER_RC6_COUNT_EN));
5082 /* For now we assume BIOS is allocating and populating the PCBR */
5083 pcbr = I915_READ(VLV_PCBR);
5085 /* 3: Enable RC6 */
5086 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
5087 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5088 rc6_mode = GEN7_RC_CTL_TO_MODE;
5090 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5092 /* 4 Program defaults and thresholds for RPS*/
5093 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5094 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5095 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5096 I915_WRITE(GEN6_RP_UP_EI, 66000);
5097 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5099 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5101 /* 5: Enable RPS */
5102 I915_WRITE(GEN6_RP_CONTROL,
5103 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5104 GEN6_RP_MEDIA_IS_GFX |
5105 GEN6_RP_ENABLE |
5106 GEN6_RP_UP_BUSY_AVG |
5107 GEN6_RP_DOWN_IDLE_AVG);
5109 /* Setting Fixed Bias */
5110 val = VLV_OVERRIDE_EN |
5111 VLV_SOC_TDP_EN |
5112 CHV_BIAS_CPU_50_SOC_50;
5113 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5115 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5117 /* RPS code assumes GPLL is used */
5118 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5120 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
5121 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5123 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5124 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5125 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5126 dev_priv->rps.cur_freq);
5128 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5129 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5130 dev_priv->rps.efficient_freq);
5132 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5134 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5137 static void valleyview_enable_rps(struct drm_device *dev)
5139 struct drm_i915_private *dev_priv = dev->dev_private;
5140 struct intel_engine_cs *ring;
5141 u32 gtfifodbg, val, rc6_mode = 0;
5142 int i;
5144 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5146 valleyview_check_pctx(dev_priv);
5148 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
5149 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5150 gtfifodbg);
5151 I915_WRITE(GTFIFODBG, gtfifodbg);
5154 /* If VLV, Forcewake all wells, else re-direct to regular path */
5155 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5157 /* Disable RC states. */
5158 I915_WRITE(GEN6_RC_CONTROL, 0);
5160 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5161 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5162 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5163 I915_WRITE(GEN6_RP_UP_EI, 66000);
5164 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5166 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5168 I915_WRITE(GEN6_RP_CONTROL,
5169 GEN6_RP_MEDIA_TURBO |
5170 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5171 GEN6_RP_MEDIA_IS_GFX |
5172 GEN6_RP_ENABLE |
5173 GEN6_RP_UP_BUSY_AVG |
5174 GEN6_RP_DOWN_IDLE_CONT);
5176 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5177 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5178 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5180 for_each_ring(ring, dev_priv, i)
5181 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5183 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
5185 /* allows RC6 residency counter to work */
5186 I915_WRITE(VLV_COUNTER_CONTROL,
5187 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5188 VLV_RENDER_RC0_COUNT_EN |
5189 VLV_MEDIA_RC6_COUNT_EN |
5190 VLV_RENDER_RC6_COUNT_EN));
5192 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
5193 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5195 intel_print_rc6_info(dev, rc6_mode);
5197 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5199 /* Setting Fixed Bias */
5200 val = VLV_OVERRIDE_EN |
5201 VLV_SOC_TDP_EN |
5202 VLV_BIAS_CPU_125_SOC_875;
5203 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5205 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5207 /* RPS code assumes GPLL is used */
5208 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5210 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
5211 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5213 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5214 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5215 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5216 dev_priv->rps.cur_freq);
5218 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5219 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5220 dev_priv->rps.efficient_freq);
5222 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5224 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5227 static unsigned long intel_pxfreq(u32 vidfreq)
5229 unsigned long freq;
5230 int div = (vidfreq & 0x3f0000) >> 16;
5231 int post = (vidfreq & 0x3000) >> 12;
5232 int pre = (vidfreq & 0x7);
5234 if (!pre)
5235 return 0;
5237 freq = ((div * 133333) / ((1<<post) * pre));
5239 return freq;
5242 static const struct cparams {
5243 u16 i;
5244 u16 t;
5245 u16 m;
5246 u16 c;
5247 } cparams[] = {
5248 { 1, 1333, 301, 28664 },
5249 { 1, 1066, 294, 24460 },
5250 { 1, 800, 294, 25192 },
5251 { 0, 1333, 276, 27605 },
5252 { 0, 1066, 276, 27605 },
5253 { 0, 800, 231, 23784 },
5256 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
5258 u64 total_count, diff, ret;
5259 u32 count1, count2, count3, m = 0, c = 0;
5260 unsigned long now = jiffies_to_msecs(jiffies), diff1;
5261 int i;
5263 assert_spin_locked(&mchdev_lock);
5265 diff1 = now - dev_priv->ips.last_time1;
5267 /* Prevent division-by-zero if we are asking too fast.
5268 * Also, we don't get interesting results if we are polling
5269 * faster than once in 10ms, so just return the saved value
5270 * in such cases.
5272 if (diff1 <= 10)
5273 return dev_priv->ips.chipset_power;
5275 count1 = I915_READ(DMIEC);
5276 count2 = I915_READ(DDREC);
5277 count3 = I915_READ(CSIEC);
5279 total_count = count1 + count2 + count3;
5281 /* FIXME: handle per-counter overflow */
5282 if (total_count < dev_priv->ips.last_count1) {
5283 diff = ~0UL - dev_priv->ips.last_count1;
5284 diff += total_count;
5285 } else {
5286 diff = total_count - dev_priv->ips.last_count1;
5289 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
5290 if (cparams[i].i == dev_priv->ips.c_m &&
5291 cparams[i].t == dev_priv->ips.r_t) {
5292 m = cparams[i].m;
5293 c = cparams[i].c;
5294 break;
5298 diff = div_u64(diff, diff1);
5299 ret = ((m * diff) + c);
5300 ret = div_u64(ret, 10);
5302 dev_priv->ips.last_count1 = total_count;
5303 dev_priv->ips.last_time1 = now;
5305 dev_priv->ips.chipset_power = ret;
5307 return ret;
5310 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5312 struct drm_device *dev = dev_priv->dev;
5313 unsigned long val;
5315 if (INTEL_INFO(dev)->gen != 5)
5316 return 0;
5318 spin_lock_irq(&mchdev_lock);
5320 val = __i915_chipset_val(dev_priv);
5322 spin_unlock_irq(&mchdev_lock);
5324 return val;
5327 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
5329 unsigned long m, x, b;
5330 u32 tsfs;
5332 tsfs = I915_READ(TSFS);
5334 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
5335 x = I915_READ8(TR1);
5337 b = tsfs & TSFS_INTR_MASK;
5339 return ((m * x) / 127) - b;
5342 static int _pxvid_to_vd(u8 pxvid)
5344 if (pxvid == 0)
5345 return 0;
5347 if (pxvid >= 8 && pxvid < 31)
5348 pxvid = 31;
5350 return (pxvid + 2) * 125;
5353 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
5355 struct drm_device *dev = dev_priv->dev;
5356 const int vd = _pxvid_to_vd(pxvid);
5357 const int vm = vd - 1125;
5359 if (INTEL_INFO(dev)->is_mobile)
5360 return vm > 0 ? vm : 0;
5362 return vd;
5365 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
5367 u64 now, diff, diffms;
5368 u32 count;
5370 assert_spin_locked(&mchdev_lock);
5372 now = ktime_get_raw_ns();
5373 diffms = now - dev_priv->ips.last_time2;
5374 do_div(diffms, NSEC_PER_MSEC);
5376 /* Don't divide by 0 */
5377 if (!diffms)
5378 return;
5380 count = I915_READ(GFXEC);
5382 if (count < dev_priv->ips.last_count2) {
5383 diff = ~0UL - dev_priv->ips.last_count2;
5384 diff += count;
5385 } else {
5386 diff = count - dev_priv->ips.last_count2;
5389 dev_priv->ips.last_count2 = count;
5390 dev_priv->ips.last_time2 = now;
5392 /* More magic constants... */
5393 diff = diff * 1181;
5394 diff = div_u64(diff, diffms * 10);
5395 dev_priv->ips.gfx_power = diff;
5398 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5400 struct drm_device *dev = dev_priv->dev;
5402 if (INTEL_INFO(dev)->gen != 5)
5403 return;
5405 spin_lock_irq(&mchdev_lock);
5407 __i915_update_gfx_val(dev_priv);
5409 spin_unlock_irq(&mchdev_lock);
5412 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5414 unsigned long t, corr, state1, corr2, state2;
5415 u32 pxvid, ext_v;
5417 assert_spin_locked(&mchdev_lock);
5419 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
5420 pxvid = (pxvid >> 24) & 0x7f;
5421 ext_v = pvid_to_extvid(dev_priv, pxvid);
5423 state1 = ext_v;
5425 t = i915_mch_val(dev_priv);
5427 /* Revel in the empirically derived constants */
5429 /* Correction factor in 1/100000 units */
5430 if (t > 80)
5431 corr = ((t * 2349) + 135940);
5432 else if (t >= 50)
5433 corr = ((t * 964) + 29317);
5434 else /* < 50 */
5435 corr = ((t * 301) + 1004);
5437 corr = corr * ((150142 * state1) / 10000 - 78642);
5438 corr /= 100000;
5439 corr2 = (corr * dev_priv->ips.corr);
5441 state2 = (corr2 * state1) / 10000;
5442 state2 /= 100; /* convert to mW */
5444 __i915_update_gfx_val(dev_priv);
5446 return dev_priv->ips.gfx_power + state2;
5449 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5451 struct drm_device *dev = dev_priv->dev;
5452 unsigned long val;
5454 if (INTEL_INFO(dev)->gen != 5)
5455 return 0;
5457 spin_lock_irq(&mchdev_lock);
5459 val = __i915_gfx_val(dev_priv);
5461 spin_unlock_irq(&mchdev_lock);
5463 return val;
5467 * i915_read_mch_val - return value for IPS use
5469 * Calculate and return a value for the IPS driver to use when deciding whether
5470 * we have thermal and power headroom to increase CPU or GPU power budget.
5472 unsigned long i915_read_mch_val(void)
5474 struct drm_i915_private *dev_priv;
5475 unsigned long chipset_val, graphics_val, ret = 0;
5477 spin_lock_irq(&mchdev_lock);
5478 if (!i915_mch_dev)
5479 goto out_unlock;
5480 dev_priv = i915_mch_dev;
5482 chipset_val = __i915_chipset_val(dev_priv);
5483 graphics_val = __i915_gfx_val(dev_priv);
5485 ret = chipset_val + graphics_val;
5487 out_unlock:
5488 spin_unlock_irq(&mchdev_lock);
5490 return ret;
5492 EXPORT_SYMBOL_GPL(i915_read_mch_val);
5495 * i915_gpu_raise - raise GPU frequency limit
5497 * Raise the limit; IPS indicates we have thermal headroom.
5499 bool i915_gpu_raise(void)
5501 struct drm_i915_private *dev_priv;
5502 bool ret = true;
5504 spin_lock_irq(&mchdev_lock);
5505 if (!i915_mch_dev) {
5506 ret = false;
5507 goto out_unlock;
5509 dev_priv = i915_mch_dev;
5511 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
5512 dev_priv->ips.max_delay--;
5514 out_unlock:
5515 spin_unlock_irq(&mchdev_lock);
5517 return ret;
5519 EXPORT_SYMBOL_GPL(i915_gpu_raise);
5522 * i915_gpu_lower - lower GPU frequency limit
5524 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5525 * frequency maximum.
5527 bool i915_gpu_lower(void)
5529 struct drm_i915_private *dev_priv;
5530 bool ret = true;
5532 spin_lock_irq(&mchdev_lock);
5533 if (!i915_mch_dev) {
5534 ret = false;
5535 goto out_unlock;
5537 dev_priv = i915_mch_dev;
5539 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
5540 dev_priv->ips.max_delay++;
5542 out_unlock:
5543 spin_unlock_irq(&mchdev_lock);
5545 return ret;
5547 EXPORT_SYMBOL_GPL(i915_gpu_lower);
5550 * i915_gpu_busy - indicate GPU business to IPS
5552 * Tell the IPS driver whether or not the GPU is busy.
5554 bool i915_gpu_busy(void)
5556 struct drm_i915_private *dev_priv;
5557 struct intel_engine_cs *ring;
5558 bool ret = false;
5559 int i;
5561 spin_lock_irq(&mchdev_lock);
5562 if (!i915_mch_dev)
5563 goto out_unlock;
5564 dev_priv = i915_mch_dev;
5566 for_each_ring(ring, dev_priv, i)
5567 ret |= !list_empty(&ring->request_list);
5569 out_unlock:
5570 spin_unlock_irq(&mchdev_lock);
5572 return ret;
5574 EXPORT_SYMBOL_GPL(i915_gpu_busy);
5577 * i915_gpu_turbo_disable - disable graphics turbo
5579 * Disable graphics turbo by resetting the max frequency and setting the
5580 * current frequency to the default.
5582 bool i915_gpu_turbo_disable(void)
5584 struct drm_i915_private *dev_priv;
5585 bool ret = true;
5587 spin_lock_irq(&mchdev_lock);
5588 if (!i915_mch_dev) {
5589 ret = false;
5590 goto out_unlock;
5592 dev_priv = i915_mch_dev;
5594 dev_priv->ips.max_delay = dev_priv->ips.fstart;
5596 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
5597 ret = false;
5599 out_unlock:
5600 spin_unlock_irq(&mchdev_lock);
5602 return ret;
5604 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
5607 * Tells the intel_ips driver that the i915 driver is now loaded, if
5608 * IPS got loaded first.
5610 * This awkward dance is so that neither module has to depend on the
5611 * other in order for IPS to do the appropriate communication of
5612 * GPU turbo limits to i915.
5614 static void
5615 ips_ping_for_i915_load(void)
5617 void (*link)(void);
5619 link = symbol_get(ips_link_to_i915_driver);
5620 if (link) {
5621 link();
5622 symbol_put(ips_link_to_i915_driver);
5626 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
5628 /* We only register the i915 ips part with intel-ips once everything is
5629 * set up, to avoid intel-ips sneaking in and reading bogus values. */
5630 spin_lock_irq(&mchdev_lock);
5631 i915_mch_dev = dev_priv;
5632 spin_unlock_irq(&mchdev_lock);
5634 ips_ping_for_i915_load();
5637 void intel_gpu_ips_teardown(void)
5639 spin_lock_irq(&mchdev_lock);
5640 i915_mch_dev = NULL;
5641 spin_unlock_irq(&mchdev_lock);
5644 static void intel_init_emon(struct drm_device *dev)
5646 struct drm_i915_private *dev_priv = dev->dev_private;
5647 u32 lcfuse;
5648 u8 pxw[16];
5649 int i;
5651 /* Disable to program */
5652 I915_WRITE(ECR, 0);
5653 POSTING_READ(ECR);
5655 /* Program energy weights for various events */
5656 I915_WRITE(SDEW, 0x15040d00);
5657 I915_WRITE(CSIEW0, 0x007f0000);
5658 I915_WRITE(CSIEW1, 0x1e220004);
5659 I915_WRITE(CSIEW2, 0x04000004);
5661 for (i = 0; i < 5; i++)
5662 I915_WRITE(PEW + (i * 4), 0);
5663 for (i = 0; i < 3; i++)
5664 I915_WRITE(DEW + (i * 4), 0);
5666 /* Program P-state weights to account for frequency power adjustment */
5667 for (i = 0; i < 16; i++) {
5668 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
5669 unsigned long freq = intel_pxfreq(pxvidfreq);
5670 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
5671 PXVFREQ_PX_SHIFT;
5672 unsigned long val;
5674 val = vid * vid;
5675 val *= (freq / 1000);
5676 val *= 255;
5677 val /= (127*127*900);
5678 if (val > 0xff)
5679 DRM_ERROR("bad pxval: %ld\n", val);
5680 pxw[i] = val;
5682 /* Render standby states get 0 weight */
5683 pxw[14] = 0;
5684 pxw[15] = 0;
5686 for (i = 0; i < 4; i++) {
5687 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
5688 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
5689 I915_WRITE(PXW + (i * 4), val);
5692 /* Adjust magic regs to magic values (more experimental results) */
5693 I915_WRITE(OGW0, 0);
5694 I915_WRITE(OGW1, 0);
5695 I915_WRITE(EG0, 0x00007f00);
5696 I915_WRITE(EG1, 0x0000000e);
5697 I915_WRITE(EG2, 0x000e0000);
5698 I915_WRITE(EG3, 0x68000300);
5699 I915_WRITE(EG4, 0x42000000);
5700 I915_WRITE(EG5, 0x00140031);
5701 I915_WRITE(EG6, 0);
5702 I915_WRITE(EG7, 0);
5704 for (i = 0; i < 8; i++)
5705 I915_WRITE(PXWL + (i * 4), 0);
5707 /* Enable PMON + select events */
5708 I915_WRITE(ECR, 0x80000019);
5710 lcfuse = I915_READ(LCFUSE02);
5712 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
5715 void intel_init_gt_powersave(struct drm_device *dev)
5717 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
5719 if (IS_CHERRYVIEW(dev))
5720 cherryview_init_gt_powersave(dev);
5721 else if (IS_VALLEYVIEW(dev))
5722 valleyview_init_gt_powersave(dev);
5725 void intel_cleanup_gt_powersave(struct drm_device *dev)
5727 if (IS_CHERRYVIEW(dev))
5728 return;
5729 else if (IS_VALLEYVIEW(dev))
5730 valleyview_cleanup_gt_powersave(dev);
5733 static void gen6_suspend_rps(struct drm_device *dev)
5735 struct drm_i915_private *dev_priv = dev->dev_private;
5737 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
5739 gen6_disable_rps_interrupts(dev);
5743 * intel_suspend_gt_powersave - suspend PM work and helper threads
5744 * @dev: drm device
5746 * We don't want to disable RC6 or other features here, we just want
5747 * to make sure any work we've queued has finished and won't bother
5748 * us while we're suspended.
5750 void intel_suspend_gt_powersave(struct drm_device *dev)
5752 struct drm_i915_private *dev_priv = dev->dev_private;
5754 if (INTEL_INFO(dev)->gen < 6)
5755 return;
5757 gen6_suspend_rps(dev);
5759 /* Force GPU to min freq during suspend */
5760 gen6_rps_idle(dev_priv);
5763 void intel_disable_gt_powersave(struct drm_device *dev)
5765 struct drm_i915_private *dev_priv = dev->dev_private;
5767 if (IS_IRONLAKE_M(dev)) {
5768 ironlake_disable_drps(dev);
5769 } else if (INTEL_INFO(dev)->gen >= 6) {
5770 intel_suspend_gt_powersave(dev);
5772 mutex_lock(&dev_priv->rps.hw_lock);
5773 if (INTEL_INFO(dev)->gen >= 9)
5774 gen9_disable_rps(dev);
5775 else if (IS_CHERRYVIEW(dev))
5776 cherryview_disable_rps(dev);
5777 else if (IS_VALLEYVIEW(dev))
5778 valleyview_disable_rps(dev);
5779 else
5780 gen6_disable_rps(dev);
5782 dev_priv->rps.enabled = false;
5783 mutex_unlock(&dev_priv->rps.hw_lock);
5787 static void intel_gen6_powersave_work(struct work_struct *work)
5789 struct drm_i915_private *dev_priv =
5790 container_of(work, struct drm_i915_private,
5791 rps.delayed_resume_work.work);
5792 struct drm_device *dev = dev_priv->dev;
5794 mutex_lock(&dev_priv->rps.hw_lock);
5796 gen6_reset_rps_interrupts(dev);
5798 if (IS_CHERRYVIEW(dev)) {
5799 cherryview_enable_rps(dev);
5800 } else if (IS_VALLEYVIEW(dev)) {
5801 valleyview_enable_rps(dev);
5802 } else if (INTEL_INFO(dev)->gen >= 9) {
5803 gen9_enable_rc6(dev);
5804 gen9_enable_rps(dev);
5805 __gen6_update_ring_freq(dev);
5806 } else if (IS_BROADWELL(dev)) {
5807 gen8_enable_rps(dev);
5808 __gen6_update_ring_freq(dev);
5809 } else {
5810 gen6_enable_rps(dev);
5811 __gen6_update_ring_freq(dev);
5814 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
5815 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
5817 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
5818 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
5820 dev_priv->rps.enabled = true;
5822 gen6_enable_rps_interrupts(dev);
5824 mutex_unlock(&dev_priv->rps.hw_lock);
5826 intel_runtime_pm_put(dev_priv);
5829 void intel_enable_gt_powersave(struct drm_device *dev)
5831 struct drm_i915_private *dev_priv = dev->dev_private;
5833 /* Powersaving is controlled by the host when inside a VM */
5834 if (intel_vgpu_active(dev))
5835 return;
5837 if (IS_IRONLAKE_M(dev)) {
5838 mutex_lock(&dev->struct_mutex);
5839 ironlake_enable_drps(dev);
5840 intel_init_emon(dev);
5841 mutex_unlock(&dev->struct_mutex);
5842 } else if (INTEL_INFO(dev)->gen >= 6) {
5844 * PCU communication is slow and this doesn't need to be
5845 * done at any specific time, so do this out of our fast path
5846 * to make resume and init faster.
5848 * We depend on the HW RC6 power context save/restore
5849 * mechanism when entering D3 through runtime PM suspend. So
5850 * disable RPM until RPS/RC6 is properly setup. We can only
5851 * get here via the driver load/system resume/runtime resume
5852 * paths, so the _noresume version is enough (and in case of
5853 * runtime resume it's necessary).
5855 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
5856 round_jiffies_up_relative(HZ)))
5857 intel_runtime_pm_get_noresume(dev_priv);
5861 void intel_reset_gt_powersave(struct drm_device *dev)
5863 struct drm_i915_private *dev_priv = dev->dev_private;
5865 if (INTEL_INFO(dev)->gen < 6)
5866 return;
5868 gen6_suspend_rps(dev);
5869 dev_priv->rps.enabled = false;
5872 static void ibx_init_clock_gating(struct drm_device *dev)
5874 struct drm_i915_private *dev_priv = dev->dev_private;
5877 * On Ibex Peak and Cougar Point, we need to disable clock
5878 * gating for the panel power sequencer or it will fail to
5879 * start up when no ports are active.
5881 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
5884 static void g4x_disable_trickle_feed(struct drm_device *dev)
5886 struct drm_i915_private *dev_priv = dev->dev_private;
5887 enum pipe pipe;
5889 for_each_pipe(dev_priv, pipe) {
5890 I915_WRITE(DSPCNTR(pipe),
5891 I915_READ(DSPCNTR(pipe)) |
5892 DISPPLANE_TRICKLE_FEED_DISABLE);
5894 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
5895 POSTING_READ(DSPSURF(pipe));
5899 static void ilk_init_lp_watermarks(struct drm_device *dev)
5901 struct drm_i915_private *dev_priv = dev->dev_private;
5903 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
5904 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
5905 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
5908 * Don't touch WM1S_LP_EN here.
5909 * Doing so could cause underruns.
5913 static void ironlake_init_clock_gating(struct drm_device *dev)
5915 struct drm_i915_private *dev_priv = dev->dev_private;
5916 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5919 * Required for FBC
5920 * WaFbcDisableDpfcClockGating:ilk
5922 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
5923 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
5924 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
5926 I915_WRITE(PCH_3DCGDIS0,
5927 MARIUNIT_CLOCK_GATE_DISABLE |
5928 SVSMUNIT_CLOCK_GATE_DISABLE);
5929 I915_WRITE(PCH_3DCGDIS1,
5930 VFMUNIT_CLOCK_GATE_DISABLE);
5933 * According to the spec the following bits should be set in
5934 * order to enable memory self-refresh
5935 * The bit 22/21 of 0x42004
5936 * The bit 5 of 0x42020
5937 * The bit 15 of 0x45000
5939 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5940 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5941 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5942 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
5943 I915_WRITE(DISP_ARB_CTL,
5944 (I915_READ(DISP_ARB_CTL) |
5945 DISP_FBC_WM_DIS));
5947 ilk_init_lp_watermarks(dev);
5950 * Based on the document from hardware guys the following bits
5951 * should be set unconditionally in order to enable FBC.
5952 * The bit 22 of 0x42000
5953 * The bit 22 of 0x42004
5954 * The bit 7,8,9 of 0x42020.
5956 if (IS_IRONLAKE_M(dev)) {
5957 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
5958 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5959 I915_READ(ILK_DISPLAY_CHICKEN1) |
5960 ILK_FBCQ_DIS);
5961 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5962 I915_READ(ILK_DISPLAY_CHICKEN2) |
5963 ILK_DPARB_GATE);
5966 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5968 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5969 I915_READ(ILK_DISPLAY_CHICKEN2) |
5970 ILK_ELPIN_409_SELECT);
5971 I915_WRITE(_3D_CHICKEN2,
5972 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
5973 _3D_CHICKEN2_WM_READ_PIPELINED);
5975 /* WaDisableRenderCachePipelinedFlush:ilk */
5976 I915_WRITE(CACHE_MODE_0,
5977 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5979 /* WaDisable_RenderCache_OperationalFlush:ilk */
5980 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5982 g4x_disable_trickle_feed(dev);
5984 ibx_init_clock_gating(dev);
5987 static void cpt_init_clock_gating(struct drm_device *dev)
5989 struct drm_i915_private *dev_priv = dev->dev_private;
5990 int pipe;
5991 uint32_t val;
5994 * On Ibex Peak and Cougar Point, we need to disable clock
5995 * gating for the panel power sequencer or it will fail to
5996 * start up when no ports are active.
5998 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
5999 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6000 PCH_CPUNIT_CLOCK_GATE_DISABLE);
6001 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6002 DPLS_EDP_PPS_FIX_DIS);
6003 /* The below fixes the weird display corruption, a few pixels shifted
6004 * downward, on (only) LVDS of some HP laptops with IVY.
6006 for_each_pipe(dev_priv, pipe) {
6007 val = I915_READ(TRANS_CHICKEN2(pipe));
6008 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6009 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6010 if (dev_priv->vbt.fdi_rx_polarity_inverted)
6011 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6012 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6013 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6014 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6015 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6017 /* WADP0ClockGatingDisable */
6018 for_each_pipe(dev_priv, pipe) {
6019 I915_WRITE(TRANS_CHICKEN1(pipe),
6020 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6024 static void gen6_check_mch_setup(struct drm_device *dev)
6026 struct drm_i915_private *dev_priv = dev->dev_private;
6027 uint32_t tmp;
6029 tmp = I915_READ(MCH_SSKPD);
6030 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6031 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6032 tmp);
6035 static void gen6_init_clock_gating(struct drm_device *dev)
6037 struct drm_i915_private *dev_priv = dev->dev_private;
6038 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6040 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6042 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6043 I915_READ(ILK_DISPLAY_CHICKEN2) |
6044 ILK_ELPIN_409_SELECT);
6046 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6047 I915_WRITE(_3D_CHICKEN,
6048 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6050 /* WaDisable_RenderCache_OperationalFlush:snb */
6051 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6054 * BSpec recoomends 8x4 when MSAA is used,
6055 * however in practice 16x4 seems fastest.
6057 * Note that PS/WM thread counts depend on the WIZ hashing
6058 * disable bit, which we don't touch here, but it's good
6059 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6061 I915_WRITE(GEN6_GT_MODE,
6062 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6064 ilk_init_lp_watermarks(dev);
6066 I915_WRITE(CACHE_MODE_0,
6067 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6069 I915_WRITE(GEN6_UCGCTL1,
6070 I915_READ(GEN6_UCGCTL1) |
6071 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6072 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6074 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6075 * gating disable must be set. Failure to set it results in
6076 * flickering pixels due to Z write ordering failures after
6077 * some amount of runtime in the Mesa "fire" demo, and Unigine
6078 * Sanctuary and Tropics, and apparently anything else with
6079 * alpha test or pixel discard.
6081 * According to the spec, bit 11 (RCCUNIT) must also be set,
6082 * but we didn't debug actual testcases to find it out.
6084 * WaDisableRCCUnitClockGating:snb
6085 * WaDisableRCPBUnitClockGating:snb
6087 I915_WRITE(GEN6_UCGCTL2,
6088 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6089 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6091 /* WaStripsFansDisableFastClipPerformanceFix:snb */
6092 I915_WRITE(_3D_CHICKEN3,
6093 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6096 * Bspec says:
6097 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6098 * 3DSTATE_SF number of SF output attributes is more than 16."
6100 I915_WRITE(_3D_CHICKEN3,
6101 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6104 * According to the spec the following bits should be
6105 * set in order to enable memory self-refresh and fbc:
6106 * The bit21 and bit22 of 0x42000
6107 * The bit21 and bit22 of 0x42004
6108 * The bit5 and bit7 of 0x42020
6109 * The bit14 of 0x70180
6110 * The bit14 of 0x71180
6112 * WaFbcAsynchFlipDisableFbcQueue:snb
6114 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6115 I915_READ(ILK_DISPLAY_CHICKEN1) |
6116 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6117 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6118 I915_READ(ILK_DISPLAY_CHICKEN2) |
6119 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6120 I915_WRITE(ILK_DSPCLK_GATE_D,
6121 I915_READ(ILK_DSPCLK_GATE_D) |
6122 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6123 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6125 g4x_disable_trickle_feed(dev);
6127 cpt_init_clock_gating(dev);
6129 gen6_check_mch_setup(dev);
6132 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6134 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6137 * WaVSThreadDispatchOverride:ivb,vlv
6139 * This actually overrides the dispatch
6140 * mode for all thread types.
6142 reg &= ~GEN7_FF_SCHED_MASK;
6143 reg |= GEN7_FF_TS_SCHED_HW;
6144 reg |= GEN7_FF_VS_SCHED_HW;
6145 reg |= GEN7_FF_DS_SCHED_HW;
6147 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6150 static void lpt_init_clock_gating(struct drm_device *dev)
6152 struct drm_i915_private *dev_priv = dev->dev_private;
6155 * TODO: this bit should only be enabled when really needed, then
6156 * disabled when not needed anymore in order to save power.
6158 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
6159 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6160 I915_READ(SOUTH_DSPCLK_GATE_D) |
6161 PCH_LP_PARTITION_LEVEL_DISABLE);
6163 /* WADPOClockGatingDisable:hsw */
6164 I915_WRITE(_TRANSA_CHICKEN1,
6165 I915_READ(_TRANSA_CHICKEN1) |
6166 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6169 static void lpt_suspend_hw(struct drm_device *dev)
6171 struct drm_i915_private *dev_priv = dev->dev_private;
6173 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6174 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6176 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6177 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6181 static void broadwell_init_clock_gating(struct drm_device *dev)
6183 struct drm_i915_private *dev_priv = dev->dev_private;
6184 enum pipe pipe;
6185 uint32_t misccpctl;
6187 ilk_init_lp_watermarks(dev);
6189 /* WaSwitchSolVfFArbitrationPriority:bdw */
6190 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6192 /* WaPsrDPAMaskVBlankInSRD:bdw */
6193 I915_WRITE(CHICKEN_PAR1_1,
6194 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6196 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6197 for_each_pipe(dev_priv, pipe) {
6198 I915_WRITE(CHICKEN_PIPESL_1(pipe),
6199 I915_READ(CHICKEN_PIPESL_1(pipe)) |
6200 BDW_DPRS_MASK_VBLANK_SRD);
6203 /* WaVSRefCountFullforceMissDisable:bdw */
6204 /* WaDSRefCountFullforceMissDisable:bdw */
6205 I915_WRITE(GEN7_FF_THREAD_MODE,
6206 I915_READ(GEN7_FF_THREAD_MODE) &
6207 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6209 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6210 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6212 /* WaDisableSDEUnitClockGating:bdw */
6213 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6214 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6217 * WaProgramL3SqcReg1Default:bdw
6218 * WaTempDisableDOPClkGating:bdw
6220 misccpctl = I915_READ(GEN7_MISCCPCTL);
6221 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6222 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
6223 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6226 * WaGttCachingOffByDefault:bdw
6227 * GTT cache may not work with big pages, so if those
6228 * are ever enabled GTT cache may need to be disabled.
6230 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6232 lpt_init_clock_gating(dev);
6235 static void haswell_init_clock_gating(struct drm_device *dev)
6237 struct drm_i915_private *dev_priv = dev->dev_private;
6239 ilk_init_lp_watermarks(dev);
6241 /* L3 caching of data atomics doesn't work -- disable it. */
6242 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6243 I915_WRITE(HSW_ROW_CHICKEN3,
6244 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6246 /* This is required by WaCatErrorRejectionIssue:hsw */
6247 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6248 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6249 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6251 /* WaVSRefCountFullforceMissDisable:hsw */
6252 I915_WRITE(GEN7_FF_THREAD_MODE,
6253 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
6255 /* WaDisable_RenderCache_OperationalFlush:hsw */
6256 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6258 /* enable HiZ Raw Stall Optimization */
6259 I915_WRITE(CACHE_MODE_0_GEN7,
6260 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6262 /* WaDisable4x2SubspanOptimization:hsw */
6263 I915_WRITE(CACHE_MODE_1,
6264 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6267 * BSpec recommends 8x4 when MSAA is used,
6268 * however in practice 16x4 seems fastest.
6270 * Note that PS/WM thread counts depend on the WIZ hashing
6271 * disable bit, which we don't touch here, but it's good
6272 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6274 I915_WRITE(GEN7_GT_MODE,
6275 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6277 /* WaSampleCChickenBitEnable:hsw */
6278 I915_WRITE(HALF_SLICE_CHICKEN3,
6279 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6281 /* WaSwitchSolVfFArbitrationPriority:hsw */
6282 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6284 /* WaRsPkgCStateDisplayPMReq:hsw */
6285 I915_WRITE(CHICKEN_PAR1_1,
6286 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
6288 lpt_init_clock_gating(dev);
6291 static void ivybridge_init_clock_gating(struct drm_device *dev)
6293 struct drm_i915_private *dev_priv = dev->dev_private;
6294 uint32_t snpcr;
6296 ilk_init_lp_watermarks(dev);
6298 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6300 /* WaDisableEarlyCull:ivb */
6301 I915_WRITE(_3D_CHICKEN3,
6302 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6304 /* WaDisableBackToBackFlipFix:ivb */
6305 I915_WRITE(IVB_CHICKEN3,
6306 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6307 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6309 /* WaDisablePSDDualDispatchEnable:ivb */
6310 if (IS_IVB_GT1(dev))
6311 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6312 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6314 /* WaDisable_RenderCache_OperationalFlush:ivb */
6315 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6317 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6318 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6319 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6321 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6322 I915_WRITE(GEN7_L3CNTLREG1,
6323 GEN7_WA_FOR_GEN7_L3_CONTROL);
6324 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
6325 GEN7_WA_L3_CHICKEN_MODE);
6326 if (IS_IVB_GT1(dev))
6327 I915_WRITE(GEN7_ROW_CHICKEN2,
6328 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6329 else {
6330 /* must write both registers */
6331 I915_WRITE(GEN7_ROW_CHICKEN2,
6332 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6333 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6334 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6337 /* WaForceL3Serialization:ivb */
6338 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6339 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6342 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6343 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
6345 I915_WRITE(GEN6_UCGCTL2,
6346 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6348 /* This is required by WaCatErrorRejectionIssue:ivb */
6349 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6350 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6351 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6353 g4x_disable_trickle_feed(dev);
6355 gen7_setup_fixed_func_scheduler(dev_priv);
6357 if (0) { /* causes HiZ corruption on ivb:gt1 */
6358 /* enable HiZ Raw Stall Optimization */
6359 I915_WRITE(CACHE_MODE_0_GEN7,
6360 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6363 /* WaDisable4x2SubspanOptimization:ivb */
6364 I915_WRITE(CACHE_MODE_1,
6365 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6368 * BSpec recommends 8x4 when MSAA is used,
6369 * however in practice 16x4 seems fastest.
6371 * Note that PS/WM thread counts depend on the WIZ hashing
6372 * disable bit, which we don't touch here, but it's good
6373 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6375 I915_WRITE(GEN7_GT_MODE,
6376 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6378 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6379 snpcr &= ~GEN6_MBC_SNPCR_MASK;
6380 snpcr |= GEN6_MBC_SNPCR_MED;
6381 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
6383 if (!HAS_PCH_NOP(dev))
6384 cpt_init_clock_gating(dev);
6386 gen6_check_mch_setup(dev);
6389 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
6391 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6394 * Disable trickle feed and enable pnd deadline calculation
6396 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6397 I915_WRITE(CBR1_VLV, 0);
6400 static void valleyview_init_clock_gating(struct drm_device *dev)
6402 struct drm_i915_private *dev_priv = dev->dev_private;
6404 vlv_init_display_clock_gating(dev_priv);
6406 /* WaDisableEarlyCull:vlv */
6407 I915_WRITE(_3D_CHICKEN3,
6408 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6410 /* WaDisableBackToBackFlipFix:vlv */
6411 I915_WRITE(IVB_CHICKEN3,
6412 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6413 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6415 /* WaPsdDispatchEnable:vlv */
6416 /* WaDisablePSDDualDispatchEnable:vlv */
6417 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6418 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6419 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6421 /* WaDisable_RenderCache_OperationalFlush:vlv */
6422 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6424 /* WaForceL3Serialization:vlv */
6425 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6426 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6428 /* WaDisableDopClockGating:vlv */
6429 I915_WRITE(GEN7_ROW_CHICKEN2,
6430 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6432 /* This is required by WaCatErrorRejectionIssue:vlv */
6433 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6434 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6435 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6437 gen7_setup_fixed_func_scheduler(dev_priv);
6440 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6441 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
6443 I915_WRITE(GEN6_UCGCTL2,
6444 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6446 /* WaDisableL3Bank2xClockGate:vlv
6447 * Disabling L3 clock gating- MMIO 940c[25] = 1
6448 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6449 I915_WRITE(GEN7_UCGCTL4,
6450 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
6453 * BSpec says this must be set, even though
6454 * WaDisable4x2SubspanOptimization isn't listed for VLV.
6456 I915_WRITE(CACHE_MODE_1,
6457 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6460 * BSpec recommends 8x4 when MSAA is used,
6461 * however in practice 16x4 seems fastest.
6463 * Note that PS/WM thread counts depend on the WIZ hashing
6464 * disable bit, which we don't touch here, but it's good
6465 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6467 I915_WRITE(GEN7_GT_MODE,
6468 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6471 * WaIncreaseL3CreditsForVLVB0:vlv
6472 * This is the hardware default actually.
6474 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
6477 * WaDisableVLVClockGating_VBIIssue:vlv
6478 * Disable clock gating on th GCFG unit to prevent a delay
6479 * in the reporting of vblank events.
6481 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
6484 static void cherryview_init_clock_gating(struct drm_device *dev)
6486 struct drm_i915_private *dev_priv = dev->dev_private;
6488 vlv_init_display_clock_gating(dev_priv);
6490 /* WaVSRefCountFullforceMissDisable:chv */
6491 /* WaDSRefCountFullforceMissDisable:chv */
6492 I915_WRITE(GEN7_FF_THREAD_MODE,
6493 I915_READ(GEN7_FF_THREAD_MODE) &
6494 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6496 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6497 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6498 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6500 /* WaDisableCSUnitClockGating:chv */
6501 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6502 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6504 /* WaDisableSDEUnitClockGating:chv */
6505 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6506 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6509 * GTT cache may not work with big pages, so if those
6510 * are ever enabled GTT cache may need to be disabled.
6512 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6515 static void g4x_init_clock_gating(struct drm_device *dev)
6517 struct drm_i915_private *dev_priv = dev->dev_private;
6518 uint32_t dspclk_gate;
6520 I915_WRITE(RENCLK_GATE_D1, 0);
6521 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
6522 GS_UNIT_CLOCK_GATE_DISABLE |
6523 CL_UNIT_CLOCK_GATE_DISABLE);
6524 I915_WRITE(RAMCLK_GATE_D, 0);
6525 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
6526 OVRUNIT_CLOCK_GATE_DISABLE |
6527 OVCUNIT_CLOCK_GATE_DISABLE;
6528 if (IS_GM45(dev))
6529 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
6530 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
6532 /* WaDisableRenderCachePipelinedFlush */
6533 I915_WRITE(CACHE_MODE_0,
6534 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6536 /* WaDisable_RenderCache_OperationalFlush:g4x */
6537 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6539 g4x_disable_trickle_feed(dev);
6542 static void crestline_init_clock_gating(struct drm_device *dev)
6544 struct drm_i915_private *dev_priv = dev->dev_private;
6546 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
6547 I915_WRITE(RENCLK_GATE_D2, 0);
6548 I915_WRITE(DSPCLK_GATE_D, 0);
6549 I915_WRITE(RAMCLK_GATE_D, 0);
6550 I915_WRITE16(DEUC, 0);
6551 I915_WRITE(MI_ARB_STATE,
6552 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6554 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6555 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6558 static void broadwater_init_clock_gating(struct drm_device *dev)
6560 struct drm_i915_private *dev_priv = dev->dev_private;
6562 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
6563 I965_RCC_CLOCK_GATE_DISABLE |
6564 I965_RCPB_CLOCK_GATE_DISABLE |
6565 I965_ISC_CLOCK_GATE_DISABLE |
6566 I965_FBC_CLOCK_GATE_DISABLE);
6567 I915_WRITE(RENCLK_GATE_D2, 0);
6568 I915_WRITE(MI_ARB_STATE,
6569 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6571 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6572 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6575 static void gen3_init_clock_gating(struct drm_device *dev)
6577 struct drm_i915_private *dev_priv = dev->dev_private;
6578 u32 dstate = I915_READ(D_STATE);
6580 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
6581 DSTATE_DOT_CLOCK_GATING;
6582 I915_WRITE(D_STATE, dstate);
6584 if (IS_PINEVIEW(dev))
6585 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
6587 /* IIR "flip pending" means done if this bit is set */
6588 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
6590 /* interrupts should cause a wake up from C3 */
6591 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
6593 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
6594 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
6596 I915_WRITE(MI_ARB_STATE,
6597 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6600 static void i85x_init_clock_gating(struct drm_device *dev)
6602 struct drm_i915_private *dev_priv = dev->dev_private;
6604 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
6606 /* interrupts should cause a wake up from C3 */
6607 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
6608 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
6610 I915_WRITE(MEM_MODE,
6611 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
6614 static void i830_init_clock_gating(struct drm_device *dev)
6616 struct drm_i915_private *dev_priv = dev->dev_private;
6618 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
6620 I915_WRITE(MEM_MODE,
6621 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
6622 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
6625 void intel_init_clock_gating(struct drm_device *dev)
6627 struct drm_i915_private *dev_priv = dev->dev_private;
6629 if (dev_priv->display.init_clock_gating)
6630 dev_priv->display.init_clock_gating(dev);
6633 void intel_suspend_hw(struct drm_device *dev)
6635 if (HAS_PCH_LPT(dev))
6636 lpt_suspend_hw(dev);
6639 /* Set up chip specific power management-related functions */
6640 void intel_init_pm(struct drm_device *dev)
6642 struct drm_i915_private *dev_priv = dev->dev_private;
6644 intel_fbc_init(dev_priv);
6646 /* For cxsr */
6647 if (IS_PINEVIEW(dev))
6648 i915_pineview_get_mem_freq(dev);
6649 else if (IS_GEN5(dev))
6650 i915_ironlake_get_mem_freq(dev);
6652 /* For FIFO watermark updates */
6653 if (INTEL_INFO(dev)->gen >= 9) {
6654 skl_setup_wm_latency(dev);
6656 if (IS_BROXTON(dev))
6657 dev_priv->display.init_clock_gating =
6658 bxt_init_clock_gating;
6659 else if (IS_SKYLAKE(dev))
6660 dev_priv->display.init_clock_gating =
6661 skl_init_clock_gating;
6662 dev_priv->display.update_wm = skl_update_wm;
6663 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
6664 } else if (HAS_PCH_SPLIT(dev)) {
6665 ilk_setup_wm_latency(dev);
6667 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6668 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
6669 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
6670 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
6671 dev_priv->display.update_wm = ilk_update_wm;
6672 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
6673 } else {
6674 DRM_DEBUG_KMS("Failed to read display plane latency. "
6675 "Disable CxSR\n");
6678 if (IS_GEN5(dev))
6679 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6680 else if (IS_GEN6(dev))
6681 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6682 else if (IS_IVYBRIDGE(dev))
6683 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6684 else if (IS_HASWELL(dev))
6685 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6686 else if (INTEL_INFO(dev)->gen == 8)
6687 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
6688 } else if (IS_CHERRYVIEW(dev)) {
6689 dev_priv->display.update_wm = valleyview_update_wm;
6690 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
6691 dev_priv->display.init_clock_gating =
6692 cherryview_init_clock_gating;
6693 } else if (IS_VALLEYVIEW(dev)) {
6694 dev_priv->display.update_wm = valleyview_update_wm;
6695 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
6696 dev_priv->display.init_clock_gating =
6697 valleyview_init_clock_gating;
6698 } else if (IS_PINEVIEW(dev)) {
6699 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
6700 dev_priv->is_ddr3,
6701 dev_priv->fsb_freq,
6702 dev_priv->mem_freq)) {
6703 DRM_INFO("failed to find known CxSR latency "
6704 "(found ddr%s fsb freq %d, mem freq %d), "
6705 "disabling CxSR\n",
6706 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6707 dev_priv->fsb_freq, dev_priv->mem_freq);
6708 /* Disable CxSR and never update its watermark again */
6709 intel_set_memory_cxsr(dev_priv, false);
6710 dev_priv->display.update_wm = NULL;
6711 } else
6712 dev_priv->display.update_wm = pineview_update_wm;
6713 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6714 } else if (IS_G4X(dev)) {
6715 dev_priv->display.update_wm = g4x_update_wm;
6716 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
6717 } else if (IS_GEN4(dev)) {
6718 dev_priv->display.update_wm = i965_update_wm;
6719 if (IS_CRESTLINE(dev))
6720 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
6721 else if (IS_BROADWATER(dev))
6722 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
6723 } else if (IS_GEN3(dev)) {
6724 dev_priv->display.update_wm = i9xx_update_wm;
6725 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6726 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6727 } else if (IS_GEN2(dev)) {
6728 if (INTEL_INFO(dev)->num_pipes == 1) {
6729 dev_priv->display.update_wm = i845_update_wm;
6730 dev_priv->display.get_fifo_size = i845_get_fifo_size;
6731 } else {
6732 dev_priv->display.update_wm = i9xx_update_wm;
6733 dev_priv->display.get_fifo_size = i830_get_fifo_size;
6736 if (IS_I85X(dev) || IS_I865G(dev))
6737 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6738 else
6739 dev_priv->display.init_clock_gating = i830_init_clock_gating;
6740 } else {
6741 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6745 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
6747 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6749 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6750 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
6751 return -EAGAIN;
6754 I915_WRITE(GEN6_PCODE_DATA, *val);
6755 I915_WRITE(GEN6_PCODE_DATA1, 0);
6756 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6758 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6759 500)) {
6760 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
6761 return -ETIMEDOUT;
6764 *val = I915_READ(GEN6_PCODE_DATA);
6765 I915_WRITE(GEN6_PCODE_DATA, 0);
6767 return 0;
6770 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
6772 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6774 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6775 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
6776 return -EAGAIN;
6779 I915_WRITE(GEN6_PCODE_DATA, val);
6780 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6782 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6783 500)) {
6784 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
6785 return -ETIMEDOUT;
6788 I915_WRITE(GEN6_PCODE_DATA, 0);
6790 return 0;
6793 static int vlv_gpu_freq_div(unsigned int czclk_freq)
6795 switch (czclk_freq) {
6796 case 200:
6797 return 10;
6798 case 267:
6799 return 12;
6800 case 320:
6801 case 333:
6802 return 16;
6803 case 400:
6804 return 20;
6805 default:
6806 return -1;
6810 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
6812 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
6814 div = vlv_gpu_freq_div(czclk_freq);
6815 if (div < 0)
6816 return div;
6818 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
6821 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
6823 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
6825 mul = vlv_gpu_freq_div(czclk_freq);
6826 if (mul < 0)
6827 return mul;
6829 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
6832 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6834 int div, czclk_freq = dev_priv->rps.cz_freq;
6836 div = vlv_gpu_freq_div(czclk_freq) / 2;
6837 if (div < 0)
6838 return div;
6840 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
6843 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
6845 int mul, czclk_freq = dev_priv->rps.cz_freq;
6847 mul = vlv_gpu_freq_div(czclk_freq) / 2;
6848 if (mul < 0)
6849 return mul;
6851 /* CHV needs even values */
6852 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
6855 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
6857 if (IS_GEN9(dev_priv->dev))
6858 return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER;
6859 else if (IS_CHERRYVIEW(dev_priv->dev))
6860 return chv_gpu_freq(dev_priv, val);
6861 else if (IS_VALLEYVIEW(dev_priv->dev))
6862 return byt_gpu_freq(dev_priv, val);
6863 else
6864 return val * GT_FREQUENCY_MULTIPLIER;
6867 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
6869 if (IS_GEN9(dev_priv->dev))
6870 return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER;
6871 else if (IS_CHERRYVIEW(dev_priv->dev))
6872 return chv_freq_opcode(dev_priv, val);
6873 else if (IS_VALLEYVIEW(dev_priv->dev))
6874 return byt_freq_opcode(dev_priv, val);
6875 else
6876 return val / GT_FREQUENCY_MULTIPLIER;
6879 struct request_boost {
6880 struct work_struct work;
6881 struct drm_i915_gem_request *req;
6884 static void __intel_rps_boost_work(struct work_struct *work)
6886 struct request_boost *boost = container_of(work, struct request_boost, work);
6887 struct drm_i915_gem_request *req = boost->req;
6889 if (!i915_gem_request_completed(req, true))
6890 gen6_rps_boost(to_i915(req->ring->dev), NULL,
6891 req->emitted_jiffies);
6893 i915_gem_request_unreference__unlocked(req);
6894 kfree(boost);
6897 void intel_queue_rps_boost_for_request(struct drm_device *dev,
6898 struct drm_i915_gem_request *req)
6900 struct request_boost *boost;
6902 if (req == NULL || INTEL_INFO(dev)->gen < 6)
6903 return;
6905 if (i915_gem_request_completed(req, true))
6906 return;
6908 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
6909 if (boost == NULL)
6910 return;
6912 i915_gem_request_reference(req);
6913 boost->req = req;
6915 INIT_WORK(&boost->work, __intel_rps_boost_work);
6916 queue_work(to_i915(dev)->wq, &boost->work);
6919 void intel_pm_setup(struct drm_device *dev)
6921 struct drm_i915_private *dev_priv = dev->dev_private;
6923 mutex_init(&dev_priv->rps.hw_lock);
6924 spin_lock_init(&dev_priv->rps.client_lock);
6926 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6927 intel_gen6_powersave_work);
6928 INIT_LIST_HEAD(&dev_priv->rps.clients);
6929 INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
6930 INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
6932 dev_priv->pm.suspended = false;