2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
34 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
35 * framebuffer contents in-memory, aiming at reducing the required bandwidth
36 * during in-memory transfers and, therefore, reduce the power packet.
38 * The benefits of FBC are mostly visible with solid backgrounds and
39 * variation-less patterns.
41 * FBC-related functionality can be enabled by the means of the
42 * i915.i915_enable_fbc parameter
45 static void i8xx_disable_fbc(struct drm_device
*dev
)
47 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
50 /* Disable compression */
51 fbc_ctl
= I915_READ(FBC_CONTROL
);
52 if ((fbc_ctl
& FBC_CTL_EN
) == 0)
55 fbc_ctl
&= ~FBC_CTL_EN
;
56 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
58 /* Wait for compressing bit to clear */
59 if (wait_for((I915_READ(FBC_STATUS
) & FBC_STAT_COMPRESSING
) == 0, 10)) {
60 DRM_DEBUG_KMS("FBC idle timed out\n");
64 DRM_DEBUG_KMS("disabled FBC\n");
67 static void i8xx_enable_fbc(struct drm_crtc
*crtc
, unsigned long interval
)
69 struct drm_device
*dev
= crtc
->dev
;
70 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
71 struct drm_framebuffer
*fb
= crtc
->fb
;
72 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
73 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
74 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
77 u32 fbc_ctl
, fbc_ctl2
;
79 cfb_pitch
= dev_priv
->cfb_size
/ FBC_LL_SIZE
;
80 if (fb
->pitches
[0] < cfb_pitch
)
81 cfb_pitch
= fb
->pitches
[0];
83 /* FBC_CTL wants 64B units */
84 cfb_pitch
= (cfb_pitch
/ 64) - 1;
85 plane
= intel_crtc
->plane
== 0 ? FBC_CTL_PLANEA
: FBC_CTL_PLANEB
;
88 for (i
= 0; i
< (FBC_LL_SIZE
/ 32) + 1; i
++)
89 I915_WRITE(FBC_TAG
+ (i
* 4), 0);
92 fbc_ctl2
= FBC_CTL_FENCE_DBL
| FBC_CTL_IDLE_IMM
| FBC_CTL_CPU_FENCE
;
94 I915_WRITE(FBC_CONTROL2
, fbc_ctl2
);
95 I915_WRITE(FBC_FENCE_OFF
, crtc
->y
);
98 fbc_ctl
= FBC_CTL_EN
| FBC_CTL_PERIODIC
;
100 fbc_ctl
|= FBC_CTL_C3_IDLE
; /* 945 needs special SR handling */
101 fbc_ctl
|= (cfb_pitch
& 0xff) << FBC_CTL_STRIDE_SHIFT
;
102 fbc_ctl
|= (interval
& 0x2fff) << FBC_CTL_INTERVAL_SHIFT
;
103 fbc_ctl
|= obj
->fence_reg
;
104 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
106 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
107 cfb_pitch
, crtc
->y
, intel_crtc
->plane
);
110 static bool i8xx_fbc_enabled(struct drm_device
*dev
)
112 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
114 return I915_READ(FBC_CONTROL
) & FBC_CTL_EN
;
117 static void g4x_enable_fbc(struct drm_crtc
*crtc
, unsigned long interval
)
119 struct drm_device
*dev
= crtc
->dev
;
120 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
121 struct drm_framebuffer
*fb
= crtc
->fb
;
122 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
123 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
124 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
125 int plane
= intel_crtc
->plane
== 0 ? DPFC_CTL_PLANEA
: DPFC_CTL_PLANEB
;
126 unsigned long stall_watermark
= 200;
129 dpfc_ctl
= plane
| DPFC_SR_EN
| DPFC_CTL_LIMIT_1X
;
130 dpfc_ctl
|= DPFC_CTL_FENCE_EN
| obj
->fence_reg
;
131 I915_WRITE(DPFC_CHICKEN
, DPFC_HT_MODIFY
);
133 I915_WRITE(DPFC_RECOMP_CTL
, DPFC_RECOMP_STALL_EN
|
134 (stall_watermark
<< DPFC_RECOMP_STALL_WM_SHIFT
) |
135 (interval
<< DPFC_RECOMP_TIMER_COUNT_SHIFT
));
136 I915_WRITE(DPFC_FENCE_YOFF
, crtc
->y
);
139 I915_WRITE(DPFC_CONTROL
, I915_READ(DPFC_CONTROL
) | DPFC_CTL_EN
);
141 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc
->plane
);
144 static void g4x_disable_fbc(struct drm_device
*dev
)
146 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
149 /* Disable compression */
150 dpfc_ctl
= I915_READ(DPFC_CONTROL
);
151 if (dpfc_ctl
& DPFC_CTL_EN
) {
152 dpfc_ctl
&= ~DPFC_CTL_EN
;
153 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
);
155 DRM_DEBUG_KMS("disabled FBC\n");
159 static bool g4x_fbc_enabled(struct drm_device
*dev
)
161 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
163 return I915_READ(DPFC_CONTROL
) & DPFC_CTL_EN
;
166 static void sandybridge_blit_fbc_update(struct drm_device
*dev
)
168 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
171 /* Make sure blitter notifies FBC of writes */
172 gen6_gt_force_wake_get(dev_priv
);
173 blt_ecoskpd
= I915_READ(GEN6_BLITTER_ECOSKPD
);
174 blt_ecoskpd
|= GEN6_BLITTER_FBC_NOTIFY
<<
175 GEN6_BLITTER_LOCK_SHIFT
;
176 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
177 blt_ecoskpd
|= GEN6_BLITTER_FBC_NOTIFY
;
178 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
179 blt_ecoskpd
&= ~(GEN6_BLITTER_FBC_NOTIFY
<<
180 GEN6_BLITTER_LOCK_SHIFT
);
181 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
182 POSTING_READ(GEN6_BLITTER_ECOSKPD
);
183 gen6_gt_force_wake_put(dev_priv
);
186 static void ironlake_enable_fbc(struct drm_crtc
*crtc
, unsigned long interval
)
188 struct drm_device
*dev
= crtc
->dev
;
189 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
190 struct drm_framebuffer
*fb
= crtc
->fb
;
191 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
192 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
193 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
194 int plane
= intel_crtc
->plane
== 0 ? DPFC_CTL_PLANEA
: DPFC_CTL_PLANEB
;
195 unsigned long stall_watermark
= 200;
198 dpfc_ctl
= I915_READ(ILK_DPFC_CONTROL
);
199 dpfc_ctl
&= DPFC_RESERVED
;
200 dpfc_ctl
|= (plane
| DPFC_CTL_LIMIT_1X
);
201 /* Set persistent mode for front-buffer rendering, ala X. */
202 dpfc_ctl
|= DPFC_CTL_PERSISTENT_MODE
;
203 dpfc_ctl
|= (DPFC_CTL_FENCE_EN
| obj
->fence_reg
);
204 I915_WRITE(ILK_DPFC_CHICKEN
, DPFC_HT_MODIFY
);
206 I915_WRITE(ILK_DPFC_RECOMP_CTL
, DPFC_RECOMP_STALL_EN
|
207 (stall_watermark
<< DPFC_RECOMP_STALL_WM_SHIFT
) |
208 (interval
<< DPFC_RECOMP_TIMER_COUNT_SHIFT
));
209 I915_WRITE(ILK_DPFC_FENCE_YOFF
, crtc
->y
);
210 I915_WRITE(ILK_FBC_RT_BASE
, obj
->gtt_offset
| ILK_FBC_RT_VALID
);
212 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
215 I915_WRITE(SNB_DPFC_CTL_SA
,
216 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
217 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, crtc
->y
);
218 sandybridge_blit_fbc_update(dev
);
221 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc
->plane
);
224 static void ironlake_disable_fbc(struct drm_device
*dev
)
226 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
229 /* Disable compression */
230 dpfc_ctl
= I915_READ(ILK_DPFC_CONTROL
);
231 if (dpfc_ctl
& DPFC_CTL_EN
) {
232 dpfc_ctl
&= ~DPFC_CTL_EN
;
233 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
);
235 DRM_DEBUG_KMS("disabled FBC\n");
239 static bool ironlake_fbc_enabled(struct drm_device
*dev
)
241 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
243 return I915_READ(ILK_DPFC_CONTROL
) & DPFC_CTL_EN
;
246 bool intel_fbc_enabled(struct drm_device
*dev
)
248 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
250 if (!dev_priv
->display
.fbc_enabled
)
253 return dev_priv
->display
.fbc_enabled(dev
);
256 static void intel_fbc_work_fn(struct work_struct
*__work
)
258 struct intel_fbc_work
*work
=
259 container_of(to_delayed_work(__work
),
260 struct intel_fbc_work
, work
);
261 struct drm_device
*dev
= work
->crtc
->dev
;
262 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
264 mutex_lock(&dev
->struct_mutex
);
265 if (work
== dev_priv
->fbc_work
) {
266 /* Double check that we haven't switched fb without cancelling
269 if (work
->crtc
->fb
== work
->fb
) {
270 dev_priv
->display
.enable_fbc(work
->crtc
,
273 dev_priv
->cfb_plane
= to_intel_crtc(work
->crtc
)->plane
;
274 dev_priv
->cfb_fb
= work
->crtc
->fb
->base
.id
;
275 dev_priv
->cfb_y
= work
->crtc
->y
;
278 dev_priv
->fbc_work
= NULL
;
280 mutex_unlock(&dev
->struct_mutex
);
285 static void intel_cancel_fbc_work(struct drm_i915_private
*dev_priv
)
287 if (dev_priv
->fbc_work
== NULL
)
290 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
292 /* Synchronisation is provided by struct_mutex and checking of
293 * dev_priv->fbc_work, so we can perform the cancellation
294 * entirely asynchronously.
296 if (cancel_delayed_work(&dev_priv
->fbc_work
->work
))
297 /* tasklet was killed before being run, clean up */
298 kfree(dev_priv
->fbc_work
);
300 /* Mark the work as no longer wanted so that if it does
301 * wake-up (because the work was already running and waiting
302 * for our mutex), it will discover that is no longer
305 dev_priv
->fbc_work
= NULL
;
308 void intel_enable_fbc(struct drm_crtc
*crtc
, unsigned long interval
)
310 struct intel_fbc_work
*work
;
311 struct drm_device
*dev
= crtc
->dev
;
312 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
314 if (!dev_priv
->display
.enable_fbc
)
317 intel_cancel_fbc_work(dev_priv
);
319 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
321 dev_priv
->display
.enable_fbc(crtc
, interval
);
327 work
->interval
= interval
;
328 INIT_DELAYED_WORK(&work
->work
, intel_fbc_work_fn
);
330 dev_priv
->fbc_work
= work
;
332 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
334 /* Delay the actual enabling to let pageflipping cease and the
335 * display to settle before starting the compression. Note that
336 * this delay also serves a second purpose: it allows for a
337 * vblank to pass after disabling the FBC before we attempt
338 * to modify the control registers.
340 * A more complicated solution would involve tracking vblanks
341 * following the termination of the page-flipping sequence
342 * and indeed performing the enable as a co-routine and not
343 * waiting synchronously upon the vblank.
345 schedule_delayed_work(&work
->work
, msecs_to_jiffies(50));
348 void intel_disable_fbc(struct drm_device
*dev
)
350 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
352 intel_cancel_fbc_work(dev_priv
);
354 if (!dev_priv
->display
.disable_fbc
)
357 dev_priv
->display
.disable_fbc(dev
);
358 dev_priv
->cfb_plane
= -1;
362 * intel_update_fbc - enable/disable FBC as needed
363 * @dev: the drm_device
365 * Set up the framebuffer compression hardware at mode set time. We
366 * enable it if possible:
367 * - plane A only (on pre-965)
368 * - no pixel mulitply/line duplication
369 * - no alpha buffer discard
371 * - framebuffer <= 2048 in width, 1536 in height
373 * We can't assume that any compression will take place (worst case),
374 * so the compressed buffer has to be the same size as the uncompressed
375 * one. It also must reside (along with the line length buffer) in
378 * We need to enable/disable FBC on a global basis.
380 void intel_update_fbc(struct drm_device
*dev
)
382 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
383 struct drm_crtc
*crtc
= NULL
, *tmp_crtc
;
384 struct intel_crtc
*intel_crtc
;
385 struct drm_framebuffer
*fb
;
386 struct intel_framebuffer
*intel_fb
;
387 struct drm_i915_gem_object
*obj
;
393 if (!I915_HAS_FBC(dev
))
397 * If FBC is already on, we just have to verify that we can
398 * keep it that way...
399 * Need to disable if:
400 * - more than one pipe is active
401 * - changing FBC params (stride, fence, mode)
402 * - new fb is too large to fit in compressed buffer
403 * - going to an unsupported config (interlace, pixel multiply, etc.)
405 list_for_each_entry(tmp_crtc
, &dev
->mode_config
.crtc_list
, head
) {
406 if (tmp_crtc
->enabled
&&
407 !to_intel_crtc(tmp_crtc
)->primary_disabled
&&
410 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
411 dev_priv
->no_fbc_reason
= FBC_MULTIPLE_PIPES
;
418 if (!crtc
|| crtc
->fb
== NULL
) {
419 DRM_DEBUG_KMS("no output, disabling\n");
420 dev_priv
->no_fbc_reason
= FBC_NO_OUTPUT
;
424 intel_crtc
= to_intel_crtc(crtc
);
426 intel_fb
= to_intel_framebuffer(fb
);
429 enable_fbc
= i915_enable_fbc
;
430 if (enable_fbc
< 0) {
431 DRM_DEBUG_KMS("fbc set to per-chip default\n");
433 if (INTEL_INFO(dev
)->gen
<= 6)
437 DRM_DEBUG_KMS("fbc disabled per module param\n");
438 dev_priv
->no_fbc_reason
= FBC_MODULE_PARAM
;
441 if (intel_fb
->obj
->base
.size
> dev_priv
->cfb_size
) {
442 DRM_DEBUG_KMS("framebuffer too large, disabling "
444 dev_priv
->no_fbc_reason
= FBC_STOLEN_TOO_SMALL
;
447 if ((crtc
->mode
.flags
& DRM_MODE_FLAG_INTERLACE
) ||
448 (crtc
->mode
.flags
& DRM_MODE_FLAG_DBLSCAN
)) {
449 DRM_DEBUG_KMS("mode incompatible with compression, "
451 dev_priv
->no_fbc_reason
= FBC_UNSUPPORTED_MODE
;
454 if ((crtc
->mode
.hdisplay
> 2048) ||
455 (crtc
->mode
.vdisplay
> 1536)) {
456 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
457 dev_priv
->no_fbc_reason
= FBC_MODE_TOO_LARGE
;
460 if ((IS_I915GM(dev
) || IS_I945GM(dev
)) && intel_crtc
->plane
!= 0) {
461 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
462 dev_priv
->no_fbc_reason
= FBC_BAD_PLANE
;
466 /* The use of a CPU fence is mandatory in order to detect writes
467 * by the CPU to the scanout and trigger updates to the FBC.
469 if (obj
->tiling_mode
!= I915_TILING_X
||
470 obj
->fence_reg
== I915_FENCE_REG_NONE
) {
471 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
472 dev_priv
->no_fbc_reason
= FBC_NOT_TILED
;
476 /* If the kernel debugger is active, always disable compression */
480 /* If the scanout has not changed, don't modify the FBC settings.
481 * Note that we make the fundamental assumption that the fb->obj
482 * cannot be unpinned (and have its GTT offset and fence revoked)
483 * without first being decoupled from the scanout and FBC disabled.
485 if (dev_priv
->cfb_plane
== intel_crtc
->plane
&&
486 dev_priv
->cfb_fb
== fb
->base
.id
&&
487 dev_priv
->cfb_y
== crtc
->y
)
490 if (intel_fbc_enabled(dev
)) {
491 /* We update FBC along two paths, after changing fb/crtc
492 * configuration (modeswitching) and after page-flipping
493 * finishes. For the latter, we know that not only did
494 * we disable the FBC at the start of the page-flip
495 * sequence, but also more than one vblank has passed.
497 * For the former case of modeswitching, it is possible
498 * to switch between two FBC valid configurations
499 * instantaneously so we do need to disable the FBC
500 * before we can modify its control registers. We also
501 * have to wait for the next vblank for that to take
502 * effect. However, since we delay enabling FBC we can
503 * assume that a vblank has passed since disabling and
504 * that we can safely alter the registers in the deferred
507 * In the scenario that we go from a valid to invalid
508 * and then back to valid FBC configuration we have
509 * no strict enforcement that a vblank occurred since
510 * disabling the FBC. However, along all current pipe
511 * disabling paths we do need to wait for a vblank at
512 * some point. And we wait before enabling FBC anyway.
514 DRM_DEBUG_KMS("disabling active FBC for update\n");
515 intel_disable_fbc(dev
);
518 intel_enable_fbc(crtc
, 500);
522 /* Multiple disables should be harmless */
523 if (intel_fbc_enabled(dev
)) {
524 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
525 intel_disable_fbc(dev
);
529 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
531 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
534 tmp
= I915_READ(CLKCFG
);
536 switch (tmp
& CLKCFG_FSB_MASK
) {
538 dev_priv
->fsb_freq
= 533; /* 133*4 */
541 dev_priv
->fsb_freq
= 800; /* 200*4 */
544 dev_priv
->fsb_freq
= 667; /* 167*4 */
547 dev_priv
->fsb_freq
= 400; /* 100*4 */
551 switch (tmp
& CLKCFG_MEM_MASK
) {
553 dev_priv
->mem_freq
= 533;
556 dev_priv
->mem_freq
= 667;
559 dev_priv
->mem_freq
= 800;
563 /* detect pineview DDR3 setting */
564 tmp
= I915_READ(CSHRDDR3CTL
);
565 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
568 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
570 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
573 ddrpll
= I915_READ16(DDRMPLL1
);
574 csipll
= I915_READ16(CSIPLL0
);
576 switch (ddrpll
& 0xff) {
578 dev_priv
->mem_freq
= 800;
581 dev_priv
->mem_freq
= 1066;
584 dev_priv
->mem_freq
= 1333;
587 dev_priv
->mem_freq
= 1600;
590 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
592 dev_priv
->mem_freq
= 0;
596 dev_priv
->r_t
= dev_priv
->mem_freq
;
598 switch (csipll
& 0x3ff) {
600 dev_priv
->fsb_freq
= 3200;
603 dev_priv
->fsb_freq
= 3733;
606 dev_priv
->fsb_freq
= 4266;
609 dev_priv
->fsb_freq
= 4800;
612 dev_priv
->fsb_freq
= 5333;
615 dev_priv
->fsb_freq
= 5866;
618 dev_priv
->fsb_freq
= 6400;
621 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
623 dev_priv
->fsb_freq
= 0;
627 if (dev_priv
->fsb_freq
== 3200) {
629 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
636 static const struct cxsr_latency cxsr_latency_table
[] = {
637 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
638 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
639 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
640 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
641 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
643 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
644 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
645 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
646 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
647 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
649 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
650 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
651 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
652 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
653 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
655 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
656 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
657 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
658 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
659 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
661 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
662 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
663 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
664 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
665 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
667 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
668 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
669 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
670 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
671 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
674 static const struct cxsr_latency
*intel_get_cxsr_latency(int is_desktop
,
679 const struct cxsr_latency
*latency
;
682 if (fsb
== 0 || mem
== 0)
685 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
686 latency
= &cxsr_latency_table
[i
];
687 if (is_desktop
== latency
->is_desktop
&&
688 is_ddr3
== latency
->is_ddr3
&&
689 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
693 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
698 static void pineview_disable_cxsr(struct drm_device
*dev
)
700 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
702 /* deactivate cxsr */
703 I915_WRITE(DSPFW3
, I915_READ(DSPFW3
) & ~PINEVIEW_SELF_REFRESH_EN
);
707 * Latency for FIFO fetches is dependent on several factors:
708 * - memory configuration (speed, channels)
710 * - current MCH state
711 * It can be fairly high in some situations, so here we assume a fairly
712 * pessimal value. It's a tradeoff between extra memory fetches (if we
713 * set this value too high, the FIFO will fetch frequently to stay full)
714 * and power consumption (set it too low to save power and we might see
715 * FIFO underruns and display "flicker").
717 * A value of 5us seems to be a good balance; safe for very low end
718 * platforms but not overly aggressive on lower latency configs.
720 static const int latency_ns
= 5000;
722 static int i9xx_get_fifo_size(struct drm_device
*dev
, int plane
)
724 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
725 uint32_t dsparb
= I915_READ(DSPARB
);
728 size
= dsparb
& 0x7f;
730 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
732 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
733 plane
? "B" : "A", size
);
738 static int i85x_get_fifo_size(struct drm_device
*dev
, int plane
)
740 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
741 uint32_t dsparb
= I915_READ(DSPARB
);
744 size
= dsparb
& 0x1ff;
746 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
747 size
>>= 1; /* Convert to cachelines */
749 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
750 plane
? "B" : "A", size
);
755 static int i845_get_fifo_size(struct drm_device
*dev
, int plane
)
757 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
758 uint32_t dsparb
= I915_READ(DSPARB
);
761 size
= dsparb
& 0x7f;
762 size
>>= 2; /* Convert to cachelines */
764 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
771 static int i830_get_fifo_size(struct drm_device
*dev
, int plane
)
773 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
774 uint32_t dsparb
= I915_READ(DSPARB
);
777 size
= dsparb
& 0x7f;
778 size
>>= 1; /* Convert to cachelines */
780 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
781 plane
? "B" : "A", size
);
786 /* Pineview has different values for various configs */
787 static const struct intel_watermark_params pineview_display_wm
= {
788 PINEVIEW_DISPLAY_FIFO
,
792 PINEVIEW_FIFO_LINE_SIZE
794 static const struct intel_watermark_params pineview_display_hplloff_wm
= {
795 PINEVIEW_DISPLAY_FIFO
,
797 PINEVIEW_DFT_HPLLOFF_WM
,
799 PINEVIEW_FIFO_LINE_SIZE
801 static const struct intel_watermark_params pineview_cursor_wm
= {
802 PINEVIEW_CURSOR_FIFO
,
803 PINEVIEW_CURSOR_MAX_WM
,
804 PINEVIEW_CURSOR_DFT_WM
,
805 PINEVIEW_CURSOR_GUARD_WM
,
806 PINEVIEW_FIFO_LINE_SIZE
,
808 static const struct intel_watermark_params pineview_cursor_hplloff_wm
= {
809 PINEVIEW_CURSOR_FIFO
,
810 PINEVIEW_CURSOR_MAX_WM
,
811 PINEVIEW_CURSOR_DFT_WM
,
812 PINEVIEW_CURSOR_GUARD_WM
,
813 PINEVIEW_FIFO_LINE_SIZE
815 static const struct intel_watermark_params g4x_wm_info
= {
822 static const struct intel_watermark_params g4x_cursor_wm_info
= {
829 static const struct intel_watermark_params valleyview_wm_info
= {
830 VALLEYVIEW_FIFO_SIZE
,
836 static const struct intel_watermark_params valleyview_cursor_wm_info
= {
838 VALLEYVIEW_CURSOR_MAX_WM
,
843 static const struct intel_watermark_params i965_cursor_wm_info
= {
850 static const struct intel_watermark_params i945_wm_info
= {
857 static const struct intel_watermark_params i915_wm_info
= {
864 static const struct intel_watermark_params i855_wm_info
= {
871 static const struct intel_watermark_params i830_wm_info
= {
879 static const struct intel_watermark_params ironlake_display_wm_info
= {
886 static const struct intel_watermark_params ironlake_cursor_wm_info
= {
893 static const struct intel_watermark_params ironlake_display_srwm_info
= {
895 ILK_DISPLAY_MAX_SRWM
,
896 ILK_DISPLAY_DFT_SRWM
,
900 static const struct intel_watermark_params ironlake_cursor_srwm_info
= {
908 static const struct intel_watermark_params sandybridge_display_wm_info
= {
915 static const struct intel_watermark_params sandybridge_cursor_wm_info
= {
922 static const struct intel_watermark_params sandybridge_display_srwm_info
= {
924 SNB_DISPLAY_MAX_SRWM
,
925 SNB_DISPLAY_DFT_SRWM
,
929 static const struct intel_watermark_params sandybridge_cursor_srwm_info
= {
939 * intel_calculate_wm - calculate watermark level
940 * @clock_in_khz: pixel clock
941 * @wm: chip FIFO params
942 * @pixel_size: display pixel size
943 * @latency_ns: memory latency for the platform
945 * Calculate the watermark level (the level at which the display plane will
946 * start fetching from memory again). Each chip has a different display
947 * FIFO size and allocation, so the caller needs to figure that out and pass
948 * in the correct intel_watermark_params structure.
950 * As the pixel clock runs, the FIFO will be drained at a rate that depends
951 * on the pixel size. When it reaches the watermark level, it'll start
952 * fetching FIFO line sized based chunks from memory until the FIFO fills
953 * past the watermark point. If the FIFO drains completely, a FIFO underrun
954 * will occur, and a display engine hang could result.
956 static unsigned long intel_calculate_wm(unsigned long clock_in_khz
,
957 const struct intel_watermark_params
*wm
,
960 unsigned long latency_ns
)
962 long entries_required
, wm_size
;
965 * Note: we need to make sure we don't overflow for various clock &
967 * clocks go from a few thousand to several hundred thousand.
968 * latency is usually a few thousand
970 entries_required
= ((clock_in_khz
/ 1000) * pixel_size
* latency_ns
) /
972 entries_required
= DIV_ROUND_UP(entries_required
, wm
->cacheline_size
);
974 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required
);
976 wm_size
= fifo_size
- (entries_required
+ wm
->guard_size
);
978 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size
);
980 /* Don't promote wm_size to unsigned... */
981 if (wm_size
> (long)wm
->max_wm
)
982 wm_size
= wm
->max_wm
;
984 wm_size
= wm
->default_wm
;
988 static struct drm_crtc
*single_enabled_crtc(struct drm_device
*dev
)
990 struct drm_crtc
*crtc
, *enabled
= NULL
;
992 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
993 if (crtc
->enabled
&& crtc
->fb
) {
1003 static void pineview_update_wm(struct drm_device
*dev
)
1005 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1006 struct drm_crtc
*crtc
;
1007 const struct cxsr_latency
*latency
;
1011 latency
= intel_get_cxsr_latency(IS_PINEVIEW_G(dev
), dev_priv
->is_ddr3
,
1012 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
1014 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1015 pineview_disable_cxsr(dev
);
1019 crtc
= single_enabled_crtc(dev
);
1021 int clock
= crtc
->mode
.clock
;
1022 int pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1025 wm
= intel_calculate_wm(clock
, &pineview_display_wm
,
1026 pineview_display_wm
.fifo_size
,
1027 pixel_size
, latency
->display_sr
);
1028 reg
= I915_READ(DSPFW1
);
1029 reg
&= ~DSPFW_SR_MASK
;
1030 reg
|= wm
<< DSPFW_SR_SHIFT
;
1031 I915_WRITE(DSPFW1
, reg
);
1032 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg
);
1035 wm
= intel_calculate_wm(clock
, &pineview_cursor_wm
,
1036 pineview_display_wm
.fifo_size
,
1037 pixel_size
, latency
->cursor_sr
);
1038 reg
= I915_READ(DSPFW3
);
1039 reg
&= ~DSPFW_CURSOR_SR_MASK
;
1040 reg
|= (wm
& 0x3f) << DSPFW_CURSOR_SR_SHIFT
;
1041 I915_WRITE(DSPFW3
, reg
);
1043 /* Display HPLL off SR */
1044 wm
= intel_calculate_wm(clock
, &pineview_display_hplloff_wm
,
1045 pineview_display_hplloff_wm
.fifo_size
,
1046 pixel_size
, latency
->display_hpll_disable
);
1047 reg
= I915_READ(DSPFW3
);
1048 reg
&= ~DSPFW_HPLL_SR_MASK
;
1049 reg
|= wm
& DSPFW_HPLL_SR_MASK
;
1050 I915_WRITE(DSPFW3
, reg
);
1052 /* cursor HPLL off SR */
1053 wm
= intel_calculate_wm(clock
, &pineview_cursor_hplloff_wm
,
1054 pineview_display_hplloff_wm
.fifo_size
,
1055 pixel_size
, latency
->cursor_hpll_disable
);
1056 reg
= I915_READ(DSPFW3
);
1057 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
1058 reg
|= (wm
& 0x3f) << DSPFW_HPLL_CURSOR_SHIFT
;
1059 I915_WRITE(DSPFW3
, reg
);
1060 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg
);
1064 I915_READ(DSPFW3
) | PINEVIEW_SELF_REFRESH_EN
);
1065 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1067 pineview_disable_cxsr(dev
);
1068 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1072 static bool g4x_compute_wm0(struct drm_device
*dev
,
1074 const struct intel_watermark_params
*display
,
1075 int display_latency_ns
,
1076 const struct intel_watermark_params
*cursor
,
1077 int cursor_latency_ns
,
1081 struct drm_crtc
*crtc
;
1082 int htotal
, hdisplay
, clock
, pixel_size
;
1083 int line_time_us
, line_count
;
1084 int entries
, tlb_miss
;
1086 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1087 if (crtc
->fb
== NULL
|| !crtc
->enabled
) {
1088 *cursor_wm
= cursor
->guard_size
;
1089 *plane_wm
= display
->guard_size
;
1093 htotal
= crtc
->mode
.htotal
;
1094 hdisplay
= crtc
->mode
.hdisplay
;
1095 clock
= crtc
->mode
.clock
;
1096 pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1098 /* Use the small buffer method to calculate plane watermark */
1099 entries
= ((clock
* pixel_size
/ 1000) * display_latency_ns
) / 1000;
1100 tlb_miss
= display
->fifo_size
*display
->cacheline_size
- hdisplay
* 8;
1102 entries
+= tlb_miss
;
1103 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
1104 *plane_wm
= entries
+ display
->guard_size
;
1105 if (*plane_wm
> (int)display
->max_wm
)
1106 *plane_wm
= display
->max_wm
;
1108 /* Use the large buffer method to calculate cursor watermark */
1109 line_time_us
= ((htotal
* 1000) / clock
);
1110 line_count
= (cursor_latency_ns
/ line_time_us
+ 1000) / 1000;
1111 entries
= line_count
* 64 * pixel_size
;
1112 tlb_miss
= cursor
->fifo_size
*cursor
->cacheline_size
- hdisplay
* 8;
1114 entries
+= tlb_miss
;
1115 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1116 *cursor_wm
= entries
+ cursor
->guard_size
;
1117 if (*cursor_wm
> (int)cursor
->max_wm
)
1118 *cursor_wm
= (int)cursor
->max_wm
;
1124 * Check the wm result.
1126 * If any calculated watermark values is larger than the maximum value that
1127 * can be programmed into the associated watermark register, that watermark
1130 static bool g4x_check_srwm(struct drm_device
*dev
,
1131 int display_wm
, int cursor_wm
,
1132 const struct intel_watermark_params
*display
,
1133 const struct intel_watermark_params
*cursor
)
1135 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1136 display_wm
, cursor_wm
);
1138 if (display_wm
> display
->max_wm
) {
1139 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1140 display_wm
, display
->max_wm
);
1144 if (cursor_wm
> cursor
->max_wm
) {
1145 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1146 cursor_wm
, cursor
->max_wm
);
1150 if (!(display_wm
|| cursor_wm
)) {
1151 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1158 static bool g4x_compute_srwm(struct drm_device
*dev
,
1161 const struct intel_watermark_params
*display
,
1162 const struct intel_watermark_params
*cursor
,
1163 int *display_wm
, int *cursor_wm
)
1165 struct drm_crtc
*crtc
;
1166 int hdisplay
, htotal
, pixel_size
, clock
;
1167 unsigned long line_time_us
;
1168 int line_count
, line_size
;
1173 *display_wm
= *cursor_wm
= 0;
1177 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1178 hdisplay
= crtc
->mode
.hdisplay
;
1179 htotal
= crtc
->mode
.htotal
;
1180 clock
= crtc
->mode
.clock
;
1181 pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1183 line_time_us
= (htotal
* 1000) / clock
;
1184 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
1185 line_size
= hdisplay
* pixel_size
;
1187 /* Use the minimum of the small and large buffer method for primary */
1188 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
1189 large
= line_count
* line_size
;
1191 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
1192 *display_wm
= entries
+ display
->guard_size
;
1194 /* calculate the self-refresh watermark for display cursor */
1195 entries
= line_count
* pixel_size
* 64;
1196 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1197 *cursor_wm
= entries
+ cursor
->guard_size
;
1199 return g4x_check_srwm(dev
,
1200 *display_wm
, *cursor_wm
,
1204 static bool vlv_compute_drain_latency(struct drm_device
*dev
,
1206 int *plane_prec_mult
,
1208 int *cursor_prec_mult
,
1211 struct drm_crtc
*crtc
;
1212 int clock
, pixel_size
;
1215 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1216 if (crtc
->fb
== NULL
|| !crtc
->enabled
)
1219 clock
= crtc
->mode
.clock
; /* VESA DOT Clock */
1220 pixel_size
= crtc
->fb
->bits_per_pixel
/ 8; /* BPP */
1222 entries
= (clock
/ 1000) * pixel_size
;
1223 *plane_prec_mult
= (entries
> 256) ?
1224 DRAIN_LATENCY_PRECISION_32
: DRAIN_LATENCY_PRECISION_16
;
1225 *plane_dl
= (64 * (*plane_prec_mult
) * 4) / ((clock
/ 1000) *
1228 entries
= (clock
/ 1000) * 4; /* BPP is always 4 for cursor */
1229 *cursor_prec_mult
= (entries
> 256) ?
1230 DRAIN_LATENCY_PRECISION_32
: DRAIN_LATENCY_PRECISION_16
;
1231 *cursor_dl
= (64 * (*cursor_prec_mult
) * 4) / ((clock
/ 1000) * 4);
1237 * Update drain latency registers of memory arbiter
1239 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1240 * to be programmed. Each plane has a drain latency multiplier and a drain
1244 static void vlv_update_drain_latency(struct drm_device
*dev
)
1246 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1247 int planea_prec
, planea_dl
, planeb_prec
, planeb_dl
;
1248 int cursora_prec
, cursora_dl
, cursorb_prec
, cursorb_dl
;
1249 int plane_prec_mult
, cursor_prec_mult
; /* Precision multiplier is
1252 /* For plane A, Cursor A */
1253 if (vlv_compute_drain_latency(dev
, 0, &plane_prec_mult
, &planea_dl
,
1254 &cursor_prec_mult
, &cursora_dl
)) {
1255 cursora_prec
= (cursor_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1256 DDL_CURSORA_PRECISION_32
: DDL_CURSORA_PRECISION_16
;
1257 planea_prec
= (plane_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1258 DDL_PLANEA_PRECISION_32
: DDL_PLANEA_PRECISION_16
;
1260 I915_WRITE(VLV_DDL1
, cursora_prec
|
1261 (cursora_dl
<< DDL_CURSORA_SHIFT
) |
1262 planea_prec
| planea_dl
);
1265 /* For plane B, Cursor B */
1266 if (vlv_compute_drain_latency(dev
, 1, &plane_prec_mult
, &planeb_dl
,
1267 &cursor_prec_mult
, &cursorb_dl
)) {
1268 cursorb_prec
= (cursor_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1269 DDL_CURSORB_PRECISION_32
: DDL_CURSORB_PRECISION_16
;
1270 planeb_prec
= (plane_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1271 DDL_PLANEB_PRECISION_32
: DDL_PLANEB_PRECISION_16
;
1273 I915_WRITE(VLV_DDL2
, cursorb_prec
|
1274 (cursorb_dl
<< DDL_CURSORB_SHIFT
) |
1275 planeb_prec
| planeb_dl
);
1279 #define single_plane_enabled(mask) is_power_of_2(mask)
1281 static void valleyview_update_wm(struct drm_device
*dev
)
1283 static const int sr_latency_ns
= 12000;
1284 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1285 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1286 int plane_sr
, cursor_sr
;
1287 unsigned int enabled
= 0;
1289 vlv_update_drain_latency(dev
);
1291 if (g4x_compute_wm0(dev
, 0,
1292 &valleyview_wm_info
, latency_ns
,
1293 &valleyview_cursor_wm_info
, latency_ns
,
1294 &planea_wm
, &cursora_wm
))
1297 if (g4x_compute_wm0(dev
, 1,
1298 &valleyview_wm_info
, latency_ns
,
1299 &valleyview_cursor_wm_info
, latency_ns
,
1300 &planeb_wm
, &cursorb_wm
))
1303 plane_sr
= cursor_sr
= 0;
1304 if (single_plane_enabled(enabled
) &&
1305 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1307 &valleyview_wm_info
,
1308 &valleyview_cursor_wm_info
,
1309 &plane_sr
, &cursor_sr
))
1310 I915_WRITE(FW_BLC_SELF_VLV
, FW_CSPWRDWNEN
);
1312 I915_WRITE(FW_BLC_SELF_VLV
,
1313 I915_READ(FW_BLC_SELF_VLV
) & ~FW_CSPWRDWNEN
);
1315 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1316 planea_wm
, cursora_wm
,
1317 planeb_wm
, cursorb_wm
,
1318 plane_sr
, cursor_sr
);
1321 (plane_sr
<< DSPFW_SR_SHIFT
) |
1322 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1323 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1326 (I915_READ(DSPFW2
) & DSPFW_CURSORA_MASK
) |
1327 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1329 (I915_READ(DSPFW3
) | (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
)));
1332 static void g4x_update_wm(struct drm_device
*dev
)
1334 static const int sr_latency_ns
= 12000;
1335 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1336 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1337 int plane_sr
, cursor_sr
;
1338 unsigned int enabled
= 0;
1340 if (g4x_compute_wm0(dev
, 0,
1341 &g4x_wm_info
, latency_ns
,
1342 &g4x_cursor_wm_info
, latency_ns
,
1343 &planea_wm
, &cursora_wm
))
1346 if (g4x_compute_wm0(dev
, 1,
1347 &g4x_wm_info
, latency_ns
,
1348 &g4x_cursor_wm_info
, latency_ns
,
1349 &planeb_wm
, &cursorb_wm
))
1352 plane_sr
= cursor_sr
= 0;
1353 if (single_plane_enabled(enabled
) &&
1354 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1357 &g4x_cursor_wm_info
,
1358 &plane_sr
, &cursor_sr
))
1359 I915_WRITE(FW_BLC_SELF
, FW_BLC_SELF_EN
);
1361 I915_WRITE(FW_BLC_SELF
,
1362 I915_READ(FW_BLC_SELF
) & ~FW_BLC_SELF_EN
);
1364 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1365 planea_wm
, cursora_wm
,
1366 planeb_wm
, cursorb_wm
,
1367 plane_sr
, cursor_sr
);
1370 (plane_sr
<< DSPFW_SR_SHIFT
) |
1371 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1372 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1375 (I915_READ(DSPFW2
) & DSPFW_CURSORA_MASK
) |
1376 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1377 /* HPLL off in SR has some issues on G4x... disable it */
1379 (I915_READ(DSPFW3
) & ~DSPFW_HPLL_SR_EN
) |
1380 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1383 static void i965_update_wm(struct drm_device
*dev
)
1385 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1386 struct drm_crtc
*crtc
;
1390 /* Calc sr entries for one plane configs */
1391 crtc
= single_enabled_crtc(dev
);
1393 /* self-refresh has much higher latency */
1394 static const int sr_latency_ns
= 12000;
1395 int clock
= crtc
->mode
.clock
;
1396 int htotal
= crtc
->mode
.htotal
;
1397 int hdisplay
= crtc
->mode
.hdisplay
;
1398 int pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1399 unsigned long line_time_us
;
1402 line_time_us
= ((htotal
* 1000) / clock
);
1404 /* Use ns/us then divide to preserve precision */
1405 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1406 pixel_size
* hdisplay
;
1407 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
1408 srwm
= I965_FIFO_SIZE
- entries
;
1412 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1415 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1417 entries
= DIV_ROUND_UP(entries
,
1418 i965_cursor_wm_info
.cacheline_size
);
1419 cursor_sr
= i965_cursor_wm_info
.fifo_size
-
1420 (entries
+ i965_cursor_wm_info
.guard_size
);
1422 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
1423 cursor_sr
= i965_cursor_wm_info
.max_wm
;
1425 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1426 "cursor %d\n", srwm
, cursor_sr
);
1428 if (IS_CRESTLINE(dev
))
1429 I915_WRITE(FW_BLC_SELF
, FW_BLC_SELF_EN
);
1431 /* Turn off self refresh if both pipes are enabled */
1432 if (IS_CRESTLINE(dev
))
1433 I915_WRITE(FW_BLC_SELF
, I915_READ(FW_BLC_SELF
)
1437 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1440 /* 965 has limitations... */
1441 I915_WRITE(DSPFW1
, (srwm
<< DSPFW_SR_SHIFT
) |
1442 (8 << 16) | (8 << 8) | (8 << 0));
1443 I915_WRITE(DSPFW2
, (8 << 8) | (8 << 0));
1444 /* update cursor SR watermark */
1445 I915_WRITE(DSPFW3
, (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1448 static void i9xx_update_wm(struct drm_device
*dev
)
1450 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1451 const struct intel_watermark_params
*wm_info
;
1456 int planea_wm
, planeb_wm
;
1457 struct drm_crtc
*crtc
, *enabled
= NULL
;
1460 wm_info
= &i945_wm_info
;
1461 else if (!IS_GEN2(dev
))
1462 wm_info
= &i915_wm_info
;
1464 wm_info
= &i855_wm_info
;
1466 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 0);
1467 crtc
= intel_get_crtc_for_plane(dev
, 0);
1468 if (crtc
->enabled
&& crtc
->fb
) {
1469 planea_wm
= intel_calculate_wm(crtc
->mode
.clock
,
1471 crtc
->fb
->bits_per_pixel
/ 8,
1475 planea_wm
= fifo_size
- wm_info
->guard_size
;
1477 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 1);
1478 crtc
= intel_get_crtc_for_plane(dev
, 1);
1479 if (crtc
->enabled
&& crtc
->fb
) {
1480 planeb_wm
= intel_calculate_wm(crtc
->mode
.clock
,
1482 crtc
->fb
->bits_per_pixel
/ 8,
1484 if (enabled
== NULL
)
1489 planeb_wm
= fifo_size
- wm_info
->guard_size
;
1491 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
1494 * Overlay gets an aggressive default since video jitter is bad.
1498 /* Play safe and disable self-refresh before adjusting watermarks. */
1499 if (IS_I945G(dev
) || IS_I945GM(dev
))
1500 I915_WRITE(FW_BLC_SELF
, FW_BLC_SELF_EN_MASK
| 0);
1501 else if (IS_I915GM(dev
))
1502 I915_WRITE(INSTPM
, I915_READ(INSTPM
) & ~INSTPM_SELF_EN
);
1504 /* Calc sr entries for one plane configs */
1505 if (HAS_FW_BLC(dev
) && enabled
) {
1506 /* self-refresh has much higher latency */
1507 static const int sr_latency_ns
= 6000;
1508 int clock
= enabled
->mode
.clock
;
1509 int htotal
= enabled
->mode
.htotal
;
1510 int hdisplay
= enabled
->mode
.hdisplay
;
1511 int pixel_size
= enabled
->fb
->bits_per_pixel
/ 8;
1512 unsigned long line_time_us
;
1515 line_time_us
= (htotal
* 1000) / clock
;
1517 /* Use ns/us then divide to preserve precision */
1518 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1519 pixel_size
* hdisplay
;
1520 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
1521 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries
);
1522 srwm
= wm_info
->fifo_size
- entries
;
1526 if (IS_I945G(dev
) || IS_I945GM(dev
))
1527 I915_WRITE(FW_BLC_SELF
,
1528 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
1529 else if (IS_I915GM(dev
))
1530 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
1533 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1534 planea_wm
, planeb_wm
, cwm
, srwm
);
1536 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
1537 fwater_hi
= (cwm
& 0x1f);
1539 /* Set request length to 8 cachelines per fetch */
1540 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
1541 fwater_hi
= fwater_hi
| (1 << 8);
1543 I915_WRITE(FW_BLC
, fwater_lo
);
1544 I915_WRITE(FW_BLC2
, fwater_hi
);
1546 if (HAS_FW_BLC(dev
)) {
1548 if (IS_I945G(dev
) || IS_I945GM(dev
))
1549 I915_WRITE(FW_BLC_SELF
,
1550 FW_BLC_SELF_EN_MASK
| FW_BLC_SELF_EN
);
1551 else if (IS_I915GM(dev
))
1552 I915_WRITE(INSTPM
, I915_READ(INSTPM
) | INSTPM_SELF_EN
);
1553 DRM_DEBUG_KMS("memory self refresh enabled\n");
1555 DRM_DEBUG_KMS("memory self refresh disabled\n");
1559 static void i830_update_wm(struct drm_device
*dev
)
1561 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1562 struct drm_crtc
*crtc
;
1566 crtc
= single_enabled_crtc(dev
);
1570 planea_wm
= intel_calculate_wm(crtc
->mode
.clock
, &i830_wm_info
,
1571 dev_priv
->display
.get_fifo_size(dev
, 0),
1572 crtc
->fb
->bits_per_pixel
/ 8,
1574 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
1575 fwater_lo
|= (3<<8) | planea_wm
;
1577 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm
);
1579 I915_WRITE(FW_BLC
, fwater_lo
);
1582 #define ILK_LP0_PLANE_LATENCY 700
1583 #define ILK_LP0_CURSOR_LATENCY 1300
1586 * Check the wm result.
1588 * If any calculated watermark values is larger than the maximum value that
1589 * can be programmed into the associated watermark register, that watermark
1592 static bool ironlake_check_srwm(struct drm_device
*dev
, int level
,
1593 int fbc_wm
, int display_wm
, int cursor_wm
,
1594 const struct intel_watermark_params
*display
,
1595 const struct intel_watermark_params
*cursor
)
1597 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1599 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1600 " cursor %d\n", level
, display_wm
, fbc_wm
, cursor_wm
);
1602 if (fbc_wm
> SNB_FBC_MAX_SRWM
) {
1603 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1604 fbc_wm
, SNB_FBC_MAX_SRWM
, level
);
1606 /* fbc has it's own way to disable FBC WM */
1607 I915_WRITE(DISP_ARB_CTL
,
1608 I915_READ(DISP_ARB_CTL
) | DISP_FBC_WM_DIS
);
1612 if (display_wm
> display
->max_wm
) {
1613 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1614 display_wm
, SNB_DISPLAY_MAX_SRWM
, level
);
1618 if (cursor_wm
> cursor
->max_wm
) {
1619 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1620 cursor_wm
, SNB_CURSOR_MAX_SRWM
, level
);
1624 if (!(fbc_wm
|| display_wm
|| cursor_wm
)) {
1625 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level
, level
);
1633 * Compute watermark values of WM[1-3],
1635 static bool ironlake_compute_srwm(struct drm_device
*dev
, int level
, int plane
,
1637 const struct intel_watermark_params
*display
,
1638 const struct intel_watermark_params
*cursor
,
1639 int *fbc_wm
, int *display_wm
, int *cursor_wm
)
1641 struct drm_crtc
*crtc
;
1642 unsigned long line_time_us
;
1643 int hdisplay
, htotal
, pixel_size
, clock
;
1644 int line_count
, line_size
;
1649 *fbc_wm
= *display_wm
= *cursor_wm
= 0;
1653 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1654 hdisplay
= crtc
->mode
.hdisplay
;
1655 htotal
= crtc
->mode
.htotal
;
1656 clock
= crtc
->mode
.clock
;
1657 pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1659 line_time_us
= (htotal
* 1000) / clock
;
1660 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
1661 line_size
= hdisplay
* pixel_size
;
1663 /* Use the minimum of the small and large buffer method for primary */
1664 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
1665 large
= line_count
* line_size
;
1667 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
1668 *display_wm
= entries
+ display
->guard_size
;
1672 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1674 *fbc_wm
= DIV_ROUND_UP(*display_wm
* 64, line_size
) + 2;
1676 /* calculate the self-refresh watermark for display cursor */
1677 entries
= line_count
* pixel_size
* 64;
1678 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1679 *cursor_wm
= entries
+ cursor
->guard_size
;
1681 return ironlake_check_srwm(dev
, level
,
1682 *fbc_wm
, *display_wm
, *cursor_wm
,
1686 static void ironlake_update_wm(struct drm_device
*dev
)
1688 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1689 int fbc_wm
, plane_wm
, cursor_wm
;
1690 unsigned int enabled
;
1693 if (g4x_compute_wm0(dev
, 0,
1694 &ironlake_display_wm_info
,
1695 ILK_LP0_PLANE_LATENCY
,
1696 &ironlake_cursor_wm_info
,
1697 ILK_LP0_CURSOR_LATENCY
,
1698 &plane_wm
, &cursor_wm
)) {
1699 I915_WRITE(WM0_PIPEA_ILK
,
1700 (plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
);
1701 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1702 " plane %d, " "cursor: %d\n",
1703 plane_wm
, cursor_wm
);
1707 if (g4x_compute_wm0(dev
, 1,
1708 &ironlake_display_wm_info
,
1709 ILK_LP0_PLANE_LATENCY
,
1710 &ironlake_cursor_wm_info
,
1711 ILK_LP0_CURSOR_LATENCY
,
1712 &plane_wm
, &cursor_wm
)) {
1713 I915_WRITE(WM0_PIPEB_ILK
,
1714 (plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
);
1715 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1716 " plane %d, cursor: %d\n",
1717 plane_wm
, cursor_wm
);
1722 * Calculate and update the self-refresh watermark only when one
1723 * display plane is used.
1725 I915_WRITE(WM3_LP_ILK
, 0);
1726 I915_WRITE(WM2_LP_ILK
, 0);
1727 I915_WRITE(WM1_LP_ILK
, 0);
1729 if (!single_plane_enabled(enabled
))
1731 enabled
= ffs(enabled
) - 1;
1734 if (!ironlake_compute_srwm(dev
, 1, enabled
,
1735 ILK_READ_WM1_LATENCY() * 500,
1736 &ironlake_display_srwm_info
,
1737 &ironlake_cursor_srwm_info
,
1738 &fbc_wm
, &plane_wm
, &cursor_wm
))
1741 I915_WRITE(WM1_LP_ILK
,
1743 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1744 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1745 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1749 if (!ironlake_compute_srwm(dev
, 2, enabled
,
1750 ILK_READ_WM2_LATENCY() * 500,
1751 &ironlake_display_srwm_info
,
1752 &ironlake_cursor_srwm_info
,
1753 &fbc_wm
, &plane_wm
, &cursor_wm
))
1756 I915_WRITE(WM2_LP_ILK
,
1758 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1759 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1760 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1764 * WM3 is unsupported on ILK, probably because we don't have latency
1765 * data for that power state
1769 static void sandybridge_update_wm(struct drm_device
*dev
)
1771 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1772 int latency
= SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1774 int fbc_wm
, plane_wm
, cursor_wm
;
1775 unsigned int enabled
;
1778 if (g4x_compute_wm0(dev
, 0,
1779 &sandybridge_display_wm_info
, latency
,
1780 &sandybridge_cursor_wm_info
, latency
,
1781 &plane_wm
, &cursor_wm
)) {
1782 val
= I915_READ(WM0_PIPEA_ILK
);
1783 val
&= ~(WM0_PIPE_PLANE_MASK
| WM0_PIPE_CURSOR_MASK
);
1784 I915_WRITE(WM0_PIPEA_ILK
, val
|
1785 ((plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
));
1786 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1787 " plane %d, " "cursor: %d\n",
1788 plane_wm
, cursor_wm
);
1792 if (g4x_compute_wm0(dev
, 1,
1793 &sandybridge_display_wm_info
, latency
,
1794 &sandybridge_cursor_wm_info
, latency
,
1795 &plane_wm
, &cursor_wm
)) {
1796 val
= I915_READ(WM0_PIPEB_ILK
);
1797 val
&= ~(WM0_PIPE_PLANE_MASK
| WM0_PIPE_CURSOR_MASK
);
1798 I915_WRITE(WM0_PIPEB_ILK
, val
|
1799 ((plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
));
1800 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1801 " plane %d, cursor: %d\n",
1802 plane_wm
, cursor_wm
);
1806 if ((dev_priv
->num_pipe
== 3) &&
1807 g4x_compute_wm0(dev
, 2,
1808 &sandybridge_display_wm_info
, latency
,
1809 &sandybridge_cursor_wm_info
, latency
,
1810 &plane_wm
, &cursor_wm
)) {
1811 val
= I915_READ(WM0_PIPEC_IVB
);
1812 val
&= ~(WM0_PIPE_PLANE_MASK
| WM0_PIPE_CURSOR_MASK
);
1813 I915_WRITE(WM0_PIPEC_IVB
, val
|
1814 ((plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
));
1815 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1816 " plane %d, cursor: %d\n",
1817 plane_wm
, cursor_wm
);
1822 * Calculate and update the self-refresh watermark only when one
1823 * display plane is used.
1825 * SNB support 3 levels of watermark.
1827 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1828 * and disabled in the descending order
1831 I915_WRITE(WM3_LP_ILK
, 0);
1832 I915_WRITE(WM2_LP_ILK
, 0);
1833 I915_WRITE(WM1_LP_ILK
, 0);
1835 if (!single_plane_enabled(enabled
) ||
1836 dev_priv
->sprite_scaling_enabled
)
1838 enabled
= ffs(enabled
) - 1;
1841 if (!ironlake_compute_srwm(dev
, 1, enabled
,
1842 SNB_READ_WM1_LATENCY() * 500,
1843 &sandybridge_display_srwm_info
,
1844 &sandybridge_cursor_srwm_info
,
1845 &fbc_wm
, &plane_wm
, &cursor_wm
))
1848 I915_WRITE(WM1_LP_ILK
,
1850 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1851 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1852 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1856 if (!ironlake_compute_srwm(dev
, 2, enabled
,
1857 SNB_READ_WM2_LATENCY() * 500,
1858 &sandybridge_display_srwm_info
,
1859 &sandybridge_cursor_srwm_info
,
1860 &fbc_wm
, &plane_wm
, &cursor_wm
))
1863 I915_WRITE(WM2_LP_ILK
,
1865 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1866 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1867 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1871 if (!ironlake_compute_srwm(dev
, 3, enabled
,
1872 SNB_READ_WM3_LATENCY() * 500,
1873 &sandybridge_display_srwm_info
,
1874 &sandybridge_cursor_srwm_info
,
1875 &fbc_wm
, &plane_wm
, &cursor_wm
))
1878 I915_WRITE(WM3_LP_ILK
,
1880 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1881 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1882 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1887 haswell_update_linetime_wm(struct drm_device
*dev
, int pipe
,
1888 struct drm_display_mode
*mode
)
1890 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1893 temp
= I915_READ(PIPE_WM_LINETIME(pipe
));
1894 temp
&= ~PIPE_WM_LINETIME_MASK
;
1896 /* The WM are computed with base on how long it takes to fill a single
1897 * row at the given clock rate, multiplied by 8.
1899 temp
|= PIPE_WM_LINETIME_TIME(
1900 ((mode
->crtc_hdisplay
* 1000) / mode
->clock
) * 8);
1902 /* IPS watermarks are only used by pipe A, and are ignored by
1903 * pipes B and C. They are calculated similarly to the common
1904 * linetime values, except that we are using CD clock frequency
1905 * in MHz instead of pixel rate for the division.
1907 * This is a placeholder for the IPS watermark calculation code.
1910 I915_WRITE(PIPE_WM_LINETIME(pipe
), temp
);
1914 sandybridge_compute_sprite_wm(struct drm_device
*dev
, int plane
,
1915 uint32_t sprite_width
, int pixel_size
,
1916 const struct intel_watermark_params
*display
,
1917 int display_latency_ns
, int *sprite_wm
)
1919 struct drm_crtc
*crtc
;
1921 int entries
, tlb_miss
;
1923 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1924 if (crtc
->fb
== NULL
|| !crtc
->enabled
) {
1925 *sprite_wm
= display
->guard_size
;
1929 clock
= crtc
->mode
.clock
;
1931 /* Use the small buffer method to calculate the sprite watermark */
1932 entries
= ((clock
* pixel_size
/ 1000) * display_latency_ns
) / 1000;
1933 tlb_miss
= display
->fifo_size
*display
->cacheline_size
-
1936 entries
+= tlb_miss
;
1937 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
1938 *sprite_wm
= entries
+ display
->guard_size
;
1939 if (*sprite_wm
> (int)display
->max_wm
)
1940 *sprite_wm
= display
->max_wm
;
1946 sandybridge_compute_sprite_srwm(struct drm_device
*dev
, int plane
,
1947 uint32_t sprite_width
, int pixel_size
,
1948 const struct intel_watermark_params
*display
,
1949 int latency_ns
, int *sprite_wm
)
1951 struct drm_crtc
*crtc
;
1952 unsigned long line_time_us
;
1954 int line_count
, line_size
;
1963 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1964 clock
= crtc
->mode
.clock
;
1970 line_time_us
= (sprite_width
* 1000) / clock
;
1971 if (!line_time_us
) {
1976 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
1977 line_size
= sprite_width
* pixel_size
;
1979 /* Use the minimum of the small and large buffer method for primary */
1980 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
1981 large
= line_count
* line_size
;
1983 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
1984 *sprite_wm
= entries
+ display
->guard_size
;
1986 return *sprite_wm
> 0x3ff ? false : true;
1989 static void sandybridge_update_sprite_wm(struct drm_device
*dev
, int pipe
,
1990 uint32_t sprite_width
, int pixel_size
)
1992 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1993 int latency
= SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
2000 reg
= WM0_PIPEA_ILK
;
2003 reg
= WM0_PIPEB_ILK
;
2006 reg
= WM0_PIPEC_IVB
;
2009 return; /* bad pipe */
2012 ret
= sandybridge_compute_sprite_wm(dev
, pipe
, sprite_width
, pixel_size
,
2013 &sandybridge_display_wm_info
,
2014 latency
, &sprite_wm
);
2016 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
2021 val
= I915_READ(reg
);
2022 val
&= ~WM0_PIPE_SPRITE_MASK
;
2023 I915_WRITE(reg
, val
| (sprite_wm
<< WM0_PIPE_SPRITE_SHIFT
));
2024 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe
, sprite_wm
);
2027 ret
= sandybridge_compute_sprite_srwm(dev
, pipe
, sprite_width
,
2029 &sandybridge_display_srwm_info
,
2030 SNB_READ_WM1_LATENCY() * 500,
2033 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
2037 I915_WRITE(WM1S_LP_ILK
, sprite_wm
);
2039 /* Only IVB has two more LP watermarks for sprite */
2040 if (!IS_IVYBRIDGE(dev
))
2043 ret
= sandybridge_compute_sprite_srwm(dev
, pipe
, sprite_width
,
2045 &sandybridge_display_srwm_info
,
2046 SNB_READ_WM2_LATENCY() * 500,
2049 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
2053 I915_WRITE(WM2S_LP_IVB
, sprite_wm
);
2055 ret
= sandybridge_compute_sprite_srwm(dev
, pipe
, sprite_width
,
2057 &sandybridge_display_srwm_info
,
2058 SNB_READ_WM3_LATENCY() * 500,
2061 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
2065 I915_WRITE(WM3S_LP_IVB
, sprite_wm
);
2069 * intel_update_watermarks - update FIFO watermark values based on current modes
2071 * Calculate watermark values for the various WM regs based on current mode
2072 * and plane configuration.
2074 * There are several cases to deal with here:
2075 * - normal (i.e. non-self-refresh)
2076 * - self-refresh (SR) mode
2077 * - lines are large relative to FIFO size (buffer can hold up to 2)
2078 * - lines are small relative to FIFO size (buffer can hold more than 2
2079 * lines), so need to account for TLB latency
2081 * The normal calculation is:
2082 * watermark = dotclock * bytes per pixel * latency
2083 * where latency is platform & configuration dependent (we assume pessimal
2086 * The SR calculation is:
2087 * watermark = (trunc(latency/line time)+1) * surface width *
2090 * line time = htotal / dotclock
2091 * surface width = hdisplay for normal plane and 64 for cursor
2092 * and latency is assumed to be high, as above.
2094 * The final value programmed to the register should always be rounded up,
2095 * and include an extra 2 entries to account for clock crossings.
2097 * We don't use the sprite, so we can ignore that. And on Crestline we have
2098 * to set the non-SR watermarks to 8.
2100 void intel_update_watermarks(struct drm_device
*dev
)
2102 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2104 if (dev_priv
->display
.update_wm
)
2105 dev_priv
->display
.update_wm(dev
);
2108 void intel_update_linetime_watermarks(struct drm_device
*dev
,
2109 int pipe
, struct drm_display_mode
*mode
)
2111 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2113 if (dev_priv
->display
.update_linetime_wm
)
2114 dev_priv
->display
.update_linetime_wm(dev
, pipe
, mode
);
2117 void intel_update_sprite_watermarks(struct drm_device
*dev
, int pipe
,
2118 uint32_t sprite_width
, int pixel_size
)
2120 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2122 if (dev_priv
->display
.update_sprite_wm
)
2123 dev_priv
->display
.update_sprite_wm(dev
, pipe
, sprite_width
,
2127 static struct drm_i915_gem_object
*
2128 intel_alloc_context_page(struct drm_device
*dev
)
2130 struct drm_i915_gem_object
*ctx
;
2133 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
2135 ctx
= i915_gem_alloc_object(dev
, 4096);
2137 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2141 ret
= i915_gem_object_pin(ctx
, 4096, true);
2143 DRM_ERROR("failed to pin power context: %d\n", ret
);
2147 ret
= i915_gem_object_set_to_gtt_domain(ctx
, 1);
2149 DRM_ERROR("failed to set-domain on power context: %d\n", ret
);
2156 i915_gem_object_unpin(ctx
);
2158 drm_gem_object_unreference(&ctx
->base
);
2159 mutex_unlock(&dev
->struct_mutex
);
2163 bool ironlake_set_drps(struct drm_device
*dev
, u8 val
)
2165 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2168 rgvswctl
= I915_READ16(MEMSWCTL
);
2169 if (rgvswctl
& MEMCTL_CMD_STS
) {
2170 DRM_DEBUG("gpu busy, RCS change rejected\n");
2171 return false; /* still busy with another command */
2174 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
2175 (val
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
2176 I915_WRITE16(MEMSWCTL
, rgvswctl
);
2177 POSTING_READ16(MEMSWCTL
);
2179 rgvswctl
|= MEMCTL_CMD_STS
;
2180 I915_WRITE16(MEMSWCTL
, rgvswctl
);
2185 static void ironlake_enable_drps(struct drm_device
*dev
)
2187 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2188 u32 rgvmodectl
= I915_READ(MEMMODECTL
);
2189 u8 fmax
, fmin
, fstart
, vstart
;
2191 /* Enable temp reporting */
2192 I915_WRITE16(PMMISC
, I915_READ(PMMISC
) | MCPPCE_EN
);
2193 I915_WRITE16(TSC1
, I915_READ(TSC1
) | TSE
);
2195 /* 100ms RC evaluation intervals */
2196 I915_WRITE(RCUPEI
, 100000);
2197 I915_WRITE(RCDNEI
, 100000);
2199 /* Set max/min thresholds to 90ms and 80ms respectively */
2200 I915_WRITE(RCBMAXAVG
, 90000);
2201 I915_WRITE(RCBMINAVG
, 80000);
2203 I915_WRITE(MEMIHYST
, 1);
2205 /* Set up min, max, and cur for interrupt handling */
2206 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
2207 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
2208 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
2209 MEMMODE_FSTART_SHIFT
;
2211 vstart
= (I915_READ(PXVFREQ_BASE
+ (fstart
* 4)) & PXVFREQ_PX_MASK
) >>
2214 dev_priv
->fmax
= fmax
; /* IPS callback will increase this */
2215 dev_priv
->fstart
= fstart
;
2217 dev_priv
->max_delay
= fstart
;
2218 dev_priv
->min_delay
= fmin
;
2219 dev_priv
->cur_delay
= fstart
;
2221 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2222 fmax
, fmin
, fstart
);
2224 I915_WRITE(MEMINTREN
, MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
2227 * Interrupts will be enabled in ironlake_irq_postinstall
2230 I915_WRITE(VIDSTART
, vstart
);
2231 POSTING_READ(VIDSTART
);
2233 rgvmodectl
|= MEMMODE_SWMODE_EN
;
2234 I915_WRITE(MEMMODECTL
, rgvmodectl
);
2236 if (wait_for((I915_READ(MEMSWCTL
) & MEMCTL_CMD_STS
) == 0, 10))
2237 DRM_ERROR("stuck trying to change perf mode\n");
2240 ironlake_set_drps(dev
, fstart
);
2242 dev_priv
->last_count1
= I915_READ(0x112e4) + I915_READ(0x112e8) +
2244 dev_priv
->last_time1
= jiffies_to_msecs(jiffies
);
2245 dev_priv
->last_count2
= I915_READ(0x112f4);
2246 getrawmonotonic(&dev_priv
->last_time2
);
2249 static void ironlake_disable_drps(struct drm_device
*dev
)
2251 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2252 u16 rgvswctl
= I915_READ16(MEMSWCTL
);
2254 /* Ack interrupts, disable EFC interrupt */
2255 I915_WRITE(MEMINTREN
, I915_READ(MEMINTREN
) & ~MEMINT_EVAL_CHG_EN
);
2256 I915_WRITE(MEMINTRSTS
, MEMINT_EVAL_CHG
);
2257 I915_WRITE(DEIER
, I915_READ(DEIER
) & ~DE_PCU_EVENT
);
2258 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
2259 I915_WRITE(DEIMR
, I915_READ(DEIMR
) | DE_PCU_EVENT
);
2261 /* Go back to the starting frequency */
2262 ironlake_set_drps(dev
, dev_priv
->fstart
);
2264 rgvswctl
|= MEMCTL_CMD_STS
;
2265 I915_WRITE(MEMSWCTL
, rgvswctl
);
2270 void gen6_set_rps(struct drm_device
*dev
, u8 val
)
2272 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2276 if (val
>= dev_priv
->max_delay
)
2277 val
= dev_priv
->max_delay
;
2279 limits
|= dev_priv
->max_delay
<< 24;
2281 if (val
<= dev_priv
->min_delay
)
2282 val
= dev_priv
->min_delay
;
2284 limits
|= dev_priv
->min_delay
<< 16;
2286 if (val
== dev_priv
->cur_delay
)
2289 I915_WRITE(GEN6_RPNSWREQ
,
2290 GEN6_FREQUENCY(val
) |
2292 GEN6_AGGRESSIVE_TURBO
);
2294 /* Make sure we continue to get interrupts
2295 * until we hit the minimum or maximum frequencies.
2297 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
, limits
);
2299 dev_priv
->cur_delay
= val
;
2302 static void gen6_disable_rps(struct drm_device
*dev
)
2304 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2306 I915_WRITE(GEN6_RC_CONTROL
, 0);
2307 I915_WRITE(GEN6_RPNSWREQ
, 1 << 31);
2308 I915_WRITE(GEN6_PMINTRMSK
, 0xffffffff);
2309 I915_WRITE(GEN6_PMIER
, 0);
2310 /* Complete PM interrupt masking here doesn't race with the rps work
2311 * item again unmasking PM interrupts because that is using a different
2312 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2313 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2315 spin_lock_irq(&dev_priv
->rps_lock
);
2316 dev_priv
->pm_iir
= 0;
2317 spin_unlock_irq(&dev_priv
->rps_lock
);
2319 I915_WRITE(GEN6_PMIIR
, I915_READ(GEN6_PMIIR
));
2322 int intel_enable_rc6(const struct drm_device
*dev
)
2325 * Respect the kernel parameter if it is set
2327 if (i915_enable_rc6
>= 0)
2328 return i915_enable_rc6
;
2331 * Disable RC6 on Ironlake
2333 if (INTEL_INFO(dev
)->gen
== 5)
2336 /* On Haswell, only RC6 is available. So let's enable it by default to
2337 * provide better testing and coverage since the beginning.
2339 if (IS_HASWELL(dev
))
2340 return INTEL_RC6_ENABLE
;
2343 * Disable rc6 on Sandybridge
2345 if (INTEL_INFO(dev
)->gen
== 6) {
2346 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2347 return INTEL_RC6_ENABLE
;
2349 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2350 return (INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
);
2353 static void gen6_enable_rps(struct drm_device
*dev
)
2355 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2356 struct intel_ring_buffer
*ring
;
2359 u32 pcu_mbox
, rc6_mask
= 0;
2364 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
2366 /* Here begins a magic sequence of register writes to enable
2367 * auto-downclocking.
2369 * Perhaps there might be some value in exposing these to
2372 I915_WRITE(GEN6_RC_STATE
, 0);
2374 /* Clear the DBG now so we don't confuse earlier errors */
2375 if ((gtfifodbg
= I915_READ(GTFIFODBG
))) {
2376 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg
);
2377 I915_WRITE(GTFIFODBG
, gtfifodbg
);
2380 gen6_gt_force_wake_get(dev_priv
);
2382 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
2383 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
2385 /* In units of 100MHz */
2386 dev_priv
->max_delay
= rp_state_cap
& 0xff;
2387 dev_priv
->min_delay
= (rp_state_cap
& 0xff0000) >> 16;
2388 dev_priv
->cur_delay
= 0;
2390 /* disable the counters and set deterministic thresholds */
2391 I915_WRITE(GEN6_RC_CONTROL
, 0);
2393 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
2394 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
2395 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
2396 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
2397 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
2399 for_each_ring(ring
, dev_priv
, i
)
2400 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
2402 I915_WRITE(GEN6_RC_SLEEP
, 0);
2403 I915_WRITE(GEN6_RC1e_THRESHOLD
, 1000);
2404 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000);
2405 I915_WRITE(GEN6_RC6p_THRESHOLD
, 100000);
2406 I915_WRITE(GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
2408 /* Check if we are enabling RC6 */
2409 rc6_mode
= intel_enable_rc6(dev_priv
->dev
);
2410 if (rc6_mode
& INTEL_RC6_ENABLE
)
2411 rc6_mask
|= GEN6_RC_CTL_RC6_ENABLE
;
2413 /* We don't use those on Haswell */
2414 if (!IS_HASWELL(dev
)) {
2415 if (rc6_mode
& INTEL_RC6p_ENABLE
)
2416 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
2418 if (rc6_mode
& INTEL_RC6pp_ENABLE
)
2419 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
2422 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2423 (rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
) ? "on" : "off",
2424 (rc6_mask
& GEN6_RC_CTL_RC6p_ENABLE
) ? "on" : "off",
2425 (rc6_mask
& GEN6_RC_CTL_RC6pp_ENABLE
) ? "on" : "off");
2427 I915_WRITE(GEN6_RC_CONTROL
,
2429 GEN6_RC_CTL_EI_MODE(1) |
2430 GEN6_RC_CTL_HW_ENABLE
);
2432 I915_WRITE(GEN6_RPNSWREQ
,
2433 GEN6_FREQUENCY(10) |
2435 GEN6_AGGRESSIVE_TURBO
);
2436 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
2437 GEN6_FREQUENCY(12));
2439 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
2440 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
2441 dev_priv
->max_delay
<< 24 |
2442 dev_priv
->min_delay
<< 16);
2444 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
2445 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
2446 I915_WRITE(GEN6_RP_UP_EI
, 66000);
2447 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
2449 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
2450 I915_WRITE(GEN6_RP_CONTROL
,
2451 GEN6_RP_MEDIA_TURBO
|
2452 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
2453 GEN6_RP_MEDIA_IS_GFX
|
2455 GEN6_RP_UP_BUSY_AVG
|
2456 (IS_HASWELL(dev
) ? GEN7_RP_DOWN_IDLE_AVG
: GEN6_RP_DOWN_IDLE_CONT
));
2458 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
2460 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2462 I915_WRITE(GEN6_PCODE_DATA
, 0);
2463 I915_WRITE(GEN6_PCODE_MAILBOX
,
2465 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
);
2466 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
2468 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2470 /* Check for overclock support */
2471 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
2473 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2474 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_READ_OC_PARAMS
);
2475 pcu_mbox
= I915_READ(GEN6_PCODE_DATA
);
2476 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
2478 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2479 if (pcu_mbox
& (1<<31)) { /* OC supported */
2480 dev_priv
->max_delay
= pcu_mbox
& 0xff;
2481 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox
* 50);
2484 gen6_set_rps(dev_priv
->dev
, (gt_perf_status
& 0xff00) >> 8);
2486 /* requires MSI enabled */
2487 I915_WRITE(GEN6_PMIER
, GEN6_PM_DEFERRED_EVENTS
);
2488 spin_lock_irq(&dev_priv
->rps_lock
);
2489 WARN_ON(dev_priv
->pm_iir
!= 0);
2490 I915_WRITE(GEN6_PMIMR
, 0);
2491 spin_unlock_irq(&dev_priv
->rps_lock
);
2492 /* enable all PM interrupts */
2493 I915_WRITE(GEN6_PMINTRMSK
, 0);
2495 gen6_gt_force_wake_put(dev_priv
);
2498 static void gen6_update_ring_freq(struct drm_device
*dev
)
2500 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2502 int gpu_freq
, ia_freq
, max_ia_freq
;
2503 int scaling_factor
= 180;
2505 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
2507 max_ia_freq
= cpufreq_quick_get_max(0);
2509 * Default to measured freq if none found, PCU will ensure we don't go
2513 max_ia_freq
= tsc_khz
;
2515 /* Convert from kHz to MHz */
2516 max_ia_freq
/= 1000;
2519 * For each potential GPU frequency, load a ring frequency we'd like
2520 * to use for memory access. We do this by specifying the IA frequency
2521 * the PCU should use as a reference to determine the ring frequency.
2523 for (gpu_freq
= dev_priv
->max_delay
; gpu_freq
>= dev_priv
->min_delay
;
2525 int diff
= dev_priv
->max_delay
- gpu_freq
;
2528 * For GPU frequencies less than 750MHz, just use the lowest
2531 if (gpu_freq
< min_freq
)
2534 ia_freq
= max_ia_freq
- ((diff
* scaling_factor
) / 2);
2535 ia_freq
= DIV_ROUND_CLOSEST(ia_freq
, 100);
2537 I915_WRITE(GEN6_PCODE_DATA
,
2538 (ia_freq
<< GEN6_PCODE_FREQ_IA_RATIO_SHIFT
) |
2540 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
|
2541 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
);
2542 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) &
2543 GEN6_PCODE_READY
) == 0, 10)) {
2544 DRM_ERROR("pcode write of freq table timed out\n");
2550 void ironlake_teardown_rc6(struct drm_device
*dev
)
2552 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2554 if (dev_priv
->renderctx
) {
2555 i915_gem_object_unpin(dev_priv
->renderctx
);
2556 drm_gem_object_unreference(&dev_priv
->renderctx
->base
);
2557 dev_priv
->renderctx
= NULL
;
2560 if (dev_priv
->pwrctx
) {
2561 i915_gem_object_unpin(dev_priv
->pwrctx
);
2562 drm_gem_object_unreference(&dev_priv
->pwrctx
->base
);
2563 dev_priv
->pwrctx
= NULL
;
2567 static void ironlake_disable_rc6(struct drm_device
*dev
)
2569 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2571 if (I915_READ(PWRCTXA
)) {
2572 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
2573 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) | RCX_SW_EXIT
);
2574 wait_for(((I915_READ(RSTDBYCTL
) & RSX_STATUS_MASK
) == RSX_STATUS_ON
),
2577 I915_WRITE(PWRCTXA
, 0);
2578 POSTING_READ(PWRCTXA
);
2580 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
2581 POSTING_READ(RSTDBYCTL
);
2585 static int ironlake_setup_rc6(struct drm_device
*dev
)
2587 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2589 if (dev_priv
->renderctx
== NULL
)
2590 dev_priv
->renderctx
= intel_alloc_context_page(dev
);
2591 if (!dev_priv
->renderctx
)
2594 if (dev_priv
->pwrctx
== NULL
)
2595 dev_priv
->pwrctx
= intel_alloc_context_page(dev
);
2596 if (!dev_priv
->pwrctx
) {
2597 ironlake_teardown_rc6(dev
);
2604 static void ironlake_enable_rc6(struct drm_device
*dev
)
2606 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2607 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[RCS
];
2610 /* rc6 disabled by default due to repeated reports of hanging during
2613 if (!intel_enable_rc6(dev
))
2616 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
2618 ret
= ironlake_setup_rc6(dev
);
2623 * GPU can automatically power down the render unit if given a page
2626 ret
= intel_ring_begin(ring
, 6);
2628 ironlake_teardown_rc6(dev
);
2632 intel_ring_emit(ring
, MI_SUSPEND_FLUSH
| MI_SUSPEND_FLUSH_EN
);
2633 intel_ring_emit(ring
, MI_SET_CONTEXT
);
2634 intel_ring_emit(ring
, dev_priv
->renderctx
->gtt_offset
|
2636 MI_SAVE_EXT_STATE_EN
|
2637 MI_RESTORE_EXT_STATE_EN
|
2638 MI_RESTORE_INHIBIT
);
2639 intel_ring_emit(ring
, MI_SUSPEND_FLUSH
);
2640 intel_ring_emit(ring
, MI_NOOP
);
2641 intel_ring_emit(ring
, MI_FLUSH
);
2642 intel_ring_advance(ring
);
2645 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2646 * does an implicit flush, combined with MI_FLUSH above, it should be
2647 * safe to assume that renderctx is valid
2649 ret
= intel_wait_ring_idle(ring
);
2651 DRM_ERROR("failed to enable ironlake power power savings\n");
2652 ironlake_teardown_rc6(dev
);
2656 I915_WRITE(PWRCTXA
, dev_priv
->pwrctx
->gtt_offset
| PWRCTX_EN
);
2657 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
2660 static unsigned long intel_pxfreq(u32 vidfreq
)
2663 int div
= (vidfreq
& 0x3f0000) >> 16;
2664 int post
= (vidfreq
& 0x3000) >> 12;
2665 int pre
= (vidfreq
& 0x7);
2670 freq
= ((div
* 133333) / ((1<<post
) * pre
));
2675 static const struct cparams
{
2681 { 1, 1333, 301, 28664 },
2682 { 1, 1066, 294, 24460 },
2683 { 1, 800, 294, 25192 },
2684 { 0, 1333, 276, 27605 },
2685 { 0, 1066, 276, 27605 },
2686 { 0, 800, 231, 23784 },
2689 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
2691 u64 total_count
, diff
, ret
;
2692 u32 count1
, count2
, count3
, m
= 0, c
= 0;
2693 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
2696 diff1
= now
- dev_priv
->last_time1
;
2698 /* Prevent division-by-zero if we are asking too fast.
2699 * Also, we don't get interesting results if we are polling
2700 * faster than once in 10ms, so just return the saved value
2704 return dev_priv
->chipset_power
;
2706 count1
= I915_READ(DMIEC
);
2707 count2
= I915_READ(DDREC
);
2708 count3
= I915_READ(CSIEC
);
2710 total_count
= count1
+ count2
+ count3
;
2712 /* FIXME: handle per-counter overflow */
2713 if (total_count
< dev_priv
->last_count1
) {
2714 diff
= ~0UL - dev_priv
->last_count1
;
2715 diff
+= total_count
;
2717 diff
= total_count
- dev_priv
->last_count1
;
2720 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
2721 if (cparams
[i
].i
== dev_priv
->c_m
&&
2722 cparams
[i
].t
== dev_priv
->r_t
) {
2729 diff
= div_u64(diff
, diff1
);
2730 ret
= ((m
* diff
) + c
);
2731 ret
= div_u64(ret
, 10);
2733 dev_priv
->last_count1
= total_count
;
2734 dev_priv
->last_time1
= now
;
2736 dev_priv
->chipset_power
= ret
;
2741 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
2743 unsigned long m
, x
, b
;
2746 tsfs
= I915_READ(TSFS
);
2748 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
2749 x
= I915_READ8(TR1
);
2751 b
= tsfs
& TSFS_INTR_MASK
;
2753 return ((m
* x
) / 127) - b
;
2756 static u16
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
2758 static const struct v_table
{
2759 u16 vd
; /* in .1 mil */
2760 u16 vm
; /* in .1 mil */
2891 if (dev_priv
->info
->is_mobile
)
2892 return v_table
[pxvid
].vm
;
2894 return v_table
[pxvid
].vd
;
2897 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
2899 struct timespec now
, diff1
;
2901 unsigned long diffms
;
2904 if (dev_priv
->info
->gen
!= 5)
2907 getrawmonotonic(&now
);
2908 diff1
= timespec_sub(now
, dev_priv
->last_time2
);
2910 /* Don't divide by 0 */
2911 diffms
= diff1
.tv_sec
* 1000 + diff1
.tv_nsec
/ 1000000;
2915 count
= I915_READ(GFXEC
);
2917 if (count
< dev_priv
->last_count2
) {
2918 diff
= ~0UL - dev_priv
->last_count2
;
2921 diff
= count
- dev_priv
->last_count2
;
2924 dev_priv
->last_count2
= count
;
2925 dev_priv
->last_time2
= now
;
2927 /* More magic constants... */
2929 diff
= div_u64(diff
, diffms
* 10);
2930 dev_priv
->gfx_power
= diff
;
2933 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
2935 unsigned long t
, corr
, state1
, corr2
, state2
;
2938 pxvid
= I915_READ(PXVFREQ_BASE
+ (dev_priv
->cur_delay
* 4));
2939 pxvid
= (pxvid
>> 24) & 0x7f;
2940 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
2944 t
= i915_mch_val(dev_priv
);
2946 /* Revel in the empirically derived constants */
2948 /* Correction factor in 1/100000 units */
2950 corr
= ((t
* 2349) + 135940);
2952 corr
= ((t
* 964) + 29317);
2954 corr
= ((t
* 301) + 1004);
2956 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
2958 corr2
= (corr
* dev_priv
->corr
);
2960 state2
= (corr2
* state1
) / 10000;
2961 state2
/= 100; /* convert to mW */
2963 i915_update_gfx_val(dev_priv
);
2965 return dev_priv
->gfx_power
+ state2
;
2968 /* Global for IPS driver to get at the current i915 device */
2969 static struct drm_i915_private
*i915_mch_dev
;
2971 * Lock protecting IPS related data structures
2973 * - dev_priv->max_delay
2974 * - dev_priv->min_delay
2976 * - dev_priv->gpu_busy
2978 static DEFINE_SPINLOCK(mchdev_lock
);
2981 * i915_read_mch_val - return value for IPS use
2983 * Calculate and return a value for the IPS driver to use when deciding whether
2984 * we have thermal and power headroom to increase CPU or GPU power budget.
2986 unsigned long i915_read_mch_val(void)
2988 struct drm_i915_private
*dev_priv
;
2989 unsigned long chipset_val
, graphics_val
, ret
= 0;
2991 spin_lock(&mchdev_lock
);
2994 dev_priv
= i915_mch_dev
;
2996 chipset_val
= i915_chipset_val(dev_priv
);
2997 graphics_val
= i915_gfx_val(dev_priv
);
2999 ret
= chipset_val
+ graphics_val
;
3002 spin_unlock(&mchdev_lock
);
3006 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
3009 * i915_gpu_raise - raise GPU frequency limit
3011 * Raise the limit; IPS indicates we have thermal headroom.
3013 bool i915_gpu_raise(void)
3015 struct drm_i915_private
*dev_priv
;
3018 spin_lock(&mchdev_lock
);
3019 if (!i915_mch_dev
) {
3023 dev_priv
= i915_mch_dev
;
3025 if (dev_priv
->max_delay
> dev_priv
->fmax
)
3026 dev_priv
->max_delay
--;
3029 spin_unlock(&mchdev_lock
);
3033 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
3036 * i915_gpu_lower - lower GPU frequency limit
3038 * IPS indicates we're close to a thermal limit, so throttle back the GPU
3039 * frequency maximum.
3041 bool i915_gpu_lower(void)
3043 struct drm_i915_private
*dev_priv
;
3046 spin_lock(&mchdev_lock
);
3047 if (!i915_mch_dev
) {
3051 dev_priv
= i915_mch_dev
;
3053 if (dev_priv
->max_delay
< dev_priv
->min_delay
)
3054 dev_priv
->max_delay
++;
3057 spin_unlock(&mchdev_lock
);
3061 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
3064 * i915_gpu_busy - indicate GPU business to IPS
3066 * Tell the IPS driver whether or not the GPU is busy.
3068 bool i915_gpu_busy(void)
3070 struct drm_i915_private
*dev_priv
;
3073 spin_lock(&mchdev_lock
);
3076 dev_priv
= i915_mch_dev
;
3078 ret
= dev_priv
->busy
;
3081 spin_unlock(&mchdev_lock
);
3085 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
3088 * i915_gpu_turbo_disable - disable graphics turbo
3090 * Disable graphics turbo by resetting the max frequency and setting the
3091 * current frequency to the default.
3093 bool i915_gpu_turbo_disable(void)
3095 struct drm_i915_private
*dev_priv
;
3098 spin_lock(&mchdev_lock
);
3099 if (!i915_mch_dev
) {
3103 dev_priv
= i915_mch_dev
;
3105 dev_priv
->max_delay
= dev_priv
->fstart
;
3107 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->fstart
))
3111 spin_unlock(&mchdev_lock
);
3115 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
3118 * Tells the intel_ips driver that the i915 driver is now loaded, if
3119 * IPS got loaded first.
3121 * This awkward dance is so that neither module has to depend on the
3122 * other in order for IPS to do the appropriate communication of
3123 * GPU turbo limits to i915.
3126 ips_ping_for_i915_load(void)
3130 link
= symbol_get(ips_link_to_i915_driver
);
3133 symbol_put(ips_link_to_i915_driver
);
3137 void intel_gpu_ips_init(struct drm_i915_private
*dev_priv
)
3139 spin_lock(&mchdev_lock
);
3140 i915_mch_dev
= dev_priv
;
3141 dev_priv
->mchdev_lock
= &mchdev_lock
;
3142 spin_unlock(&mchdev_lock
);
3144 ips_ping_for_i915_load();
3147 void intel_gpu_ips_teardown(void)
3149 spin_lock(&mchdev_lock
);
3150 i915_mch_dev
= NULL
;
3151 spin_unlock(&mchdev_lock
);
3153 static void intel_init_emon(struct drm_device
*dev
)
3155 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3160 /* Disable to program */
3164 /* Program energy weights for various events */
3165 I915_WRITE(SDEW
, 0x15040d00);
3166 I915_WRITE(CSIEW0
, 0x007f0000);
3167 I915_WRITE(CSIEW1
, 0x1e220004);
3168 I915_WRITE(CSIEW2
, 0x04000004);
3170 for (i
= 0; i
< 5; i
++)
3171 I915_WRITE(PEW
+ (i
* 4), 0);
3172 for (i
= 0; i
< 3; i
++)
3173 I915_WRITE(DEW
+ (i
* 4), 0);
3175 /* Program P-state weights to account for frequency power adjustment */
3176 for (i
= 0; i
< 16; i
++) {
3177 u32 pxvidfreq
= I915_READ(PXVFREQ_BASE
+ (i
* 4));
3178 unsigned long freq
= intel_pxfreq(pxvidfreq
);
3179 unsigned long vid
= (pxvidfreq
& PXVFREQ_PX_MASK
) >>
3184 val
*= (freq
/ 1000);
3186 val
/= (127*127*900);
3188 DRM_ERROR("bad pxval: %ld\n", val
);
3191 /* Render standby states get 0 weight */
3195 for (i
= 0; i
< 4; i
++) {
3196 u32 val
= (pxw
[i
*4] << 24) | (pxw
[(i
*4)+1] << 16) |
3197 (pxw
[(i
*4)+2] << 8) | (pxw
[(i
*4)+3]);
3198 I915_WRITE(PXW
+ (i
* 4), val
);
3201 /* Adjust magic regs to magic values (more experimental results) */
3202 I915_WRITE(OGW0
, 0);
3203 I915_WRITE(OGW1
, 0);
3204 I915_WRITE(EG0
, 0x00007f00);
3205 I915_WRITE(EG1
, 0x0000000e);
3206 I915_WRITE(EG2
, 0x000e0000);
3207 I915_WRITE(EG3
, 0x68000300);
3208 I915_WRITE(EG4
, 0x42000000);
3209 I915_WRITE(EG5
, 0x00140031);
3213 for (i
= 0; i
< 8; i
++)
3214 I915_WRITE(PXWL
+ (i
* 4), 0);
3216 /* Enable PMON + select events */
3217 I915_WRITE(ECR
, 0x80000019);
3219 lcfuse
= I915_READ(LCFUSE02
);
3221 dev_priv
->corr
= (lcfuse
& LCFUSE_HIV_MASK
);
3224 void intel_disable_gt_powersave(struct drm_device
*dev
)
3226 if (IS_IRONLAKE_M(dev
)) {
3227 ironlake_disable_drps(dev
);
3228 ironlake_disable_rc6(dev
);
3229 } else if (INTEL_INFO(dev
)->gen
>= 6 && !IS_VALLEYVIEW(dev
)) {
3230 gen6_disable_rps(dev
);
3234 void intel_enable_gt_powersave(struct drm_device
*dev
)
3236 if (IS_IRONLAKE_M(dev
)) {
3237 ironlake_enable_drps(dev
);
3238 ironlake_enable_rc6(dev
);
3239 intel_init_emon(dev
);
3240 } else if ((IS_GEN6(dev
) || IS_GEN7(dev
)) && !IS_VALLEYVIEW(dev
)) {
3241 gen6_enable_rps(dev
);
3242 gen6_update_ring_freq(dev
);
3246 static void ironlake_init_clock_gating(struct drm_device
*dev
)
3248 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3249 uint32_t dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
;
3251 /* Required for FBC */
3252 dspclk_gate
|= DPFCUNIT_CLOCK_GATE_DISABLE
|
3253 DPFCRUNIT_CLOCK_GATE_DISABLE
|
3254 DPFDUNIT_CLOCK_GATE_DISABLE
;
3255 /* Required for CxSR */
3256 dspclk_gate
|= DPARBUNIT_CLOCK_GATE_DISABLE
;
3258 I915_WRITE(PCH_3DCGDIS0
,
3259 MARIUNIT_CLOCK_GATE_DISABLE
|
3260 SVSMUNIT_CLOCK_GATE_DISABLE
);
3261 I915_WRITE(PCH_3DCGDIS1
,
3262 VFMUNIT_CLOCK_GATE_DISABLE
);
3264 I915_WRITE(PCH_DSPCLK_GATE_D
, dspclk_gate
);
3267 * According to the spec the following bits should be set in
3268 * order to enable memory self-refresh
3269 * The bit 22/21 of 0x42004
3270 * The bit 5 of 0x42020
3271 * The bit 15 of 0x45000
3273 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
3274 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
3275 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
3276 I915_WRITE(ILK_DSPCLK_GATE
,
3277 (I915_READ(ILK_DSPCLK_GATE
) |
3278 ILK_DPARB_CLK_GATE
));
3279 I915_WRITE(DISP_ARB_CTL
,
3280 (I915_READ(DISP_ARB_CTL
) |
3282 I915_WRITE(WM3_LP_ILK
, 0);
3283 I915_WRITE(WM2_LP_ILK
, 0);
3284 I915_WRITE(WM1_LP_ILK
, 0);
3287 * Based on the document from hardware guys the following bits
3288 * should be set unconditionally in order to enable FBC.
3289 * The bit 22 of 0x42000
3290 * The bit 22 of 0x42004
3291 * The bit 7,8,9 of 0x42020.
3293 if (IS_IRONLAKE_M(dev
)) {
3294 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
3295 I915_READ(ILK_DISPLAY_CHICKEN1
) |
3297 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
3298 I915_READ(ILK_DISPLAY_CHICKEN2
) |
3300 I915_WRITE(ILK_DSPCLK_GATE
,
3301 I915_READ(ILK_DSPCLK_GATE
) |
3307 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
3308 I915_READ(ILK_DISPLAY_CHICKEN2
) |
3309 ILK_ELPIN_409_SELECT
);
3310 I915_WRITE(_3D_CHICKEN2
,
3311 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
3312 _3D_CHICKEN2_WM_READ_PIPELINED
);
3315 static void gen6_init_clock_gating(struct drm_device
*dev
)
3317 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3319 uint32_t dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
;
3321 I915_WRITE(PCH_DSPCLK_GATE_D
, dspclk_gate
);
3323 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
3324 I915_READ(ILK_DISPLAY_CHICKEN2
) |
3325 ILK_ELPIN_409_SELECT
);
3327 I915_WRITE(WM3_LP_ILK
, 0);
3328 I915_WRITE(WM2_LP_ILK
, 0);
3329 I915_WRITE(WM1_LP_ILK
, 0);
3331 I915_WRITE(CACHE_MODE_0
,
3332 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
3334 I915_WRITE(GEN6_UCGCTL1
,
3335 I915_READ(GEN6_UCGCTL1
) |
3336 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
3337 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
3339 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3340 * gating disable must be set. Failure to set it results in
3341 * flickering pixels due to Z write ordering failures after
3342 * some amount of runtime in the Mesa "fire" demo, and Unigine
3343 * Sanctuary and Tropics, and apparently anything else with
3344 * alpha test or pixel discard.
3346 * According to the spec, bit 11 (RCCUNIT) must also be set,
3347 * but we didn't debug actual testcases to find it out.
3349 * Also apply WaDisableVDSUnitClockGating and
3350 * WaDisableRCPBUnitClockGating.
3352 I915_WRITE(GEN6_UCGCTL2
,
3353 GEN7_VDSUNIT_CLOCK_GATE_DISABLE
|
3354 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
3355 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
3357 /* Bspec says we need to always set all mask bits. */
3358 I915_WRITE(_3D_CHICKEN3
, (0xFFFF << 16) |
3359 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
);
3362 * According to the spec the following bits should be
3363 * set in order to enable memory self-refresh and fbc:
3364 * The bit21 and bit22 of 0x42000
3365 * The bit21 and bit22 of 0x42004
3366 * The bit5 and bit7 of 0x42020
3367 * The bit14 of 0x70180
3368 * The bit14 of 0x71180
3370 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
3371 I915_READ(ILK_DISPLAY_CHICKEN1
) |
3372 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
3373 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
3374 I915_READ(ILK_DISPLAY_CHICKEN2
) |
3375 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
3376 I915_WRITE(ILK_DSPCLK_GATE
,
3377 I915_READ(ILK_DSPCLK_GATE
) |
3378 ILK_DPARB_CLK_GATE
|
3381 I915_WRITE(GEN6_MBCTL
, I915_READ(GEN6_MBCTL
) |
3382 GEN6_MBCTL_ENABLE_BOOT_FETCH
);
3384 for_each_pipe(pipe
) {
3385 I915_WRITE(DSPCNTR(pipe
),
3386 I915_READ(DSPCNTR(pipe
)) |
3387 DISPPLANE_TRICKLE_FEED_DISABLE
);
3388 intel_flush_display_plane(dev_priv
, pipe
);
3391 /* The default value should be 0x200 according to docs, but the two
3392 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
3393 I915_WRITE(GEN6_GT_MODE
, _MASKED_BIT_DISABLE(0xffff));
3394 I915_WRITE(GEN6_GT_MODE
, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI
));
3397 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
3399 uint32_t reg
= I915_READ(GEN7_FF_THREAD_MODE
);
3401 reg
&= ~GEN7_FF_SCHED_MASK
;
3402 reg
|= GEN7_FF_TS_SCHED_HW
;
3403 reg
|= GEN7_FF_VS_SCHED_HW
;
3404 reg
|= GEN7_FF_DS_SCHED_HW
;
3406 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
3409 static void haswell_init_clock_gating(struct drm_device
*dev
)
3411 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3413 uint32_t dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
;
3415 I915_WRITE(PCH_DSPCLK_GATE_D
, dspclk_gate
);
3417 I915_WRITE(WM3_LP_ILK
, 0);
3418 I915_WRITE(WM2_LP_ILK
, 0);
3419 I915_WRITE(WM1_LP_ILK
, 0);
3421 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3422 * This implements the WaDisableRCZUnitClockGating workaround.
3424 I915_WRITE(GEN6_UCGCTL2
, GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
3426 I915_WRITE(ILK_DSPCLK_GATE
, IVB_VRHUNIT_CLK_GATE
);
3428 I915_WRITE(IVB_CHICKEN3
,
3429 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
3430 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
3432 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3433 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
3434 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
3436 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3437 I915_WRITE(GEN7_L3CNTLREG1
,
3438 GEN7_WA_FOR_GEN7_L3_CONTROL
);
3439 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
3440 GEN7_WA_L3_CHICKEN_MODE
);
3442 /* This is required by WaCatErrorRejectionIssue */
3443 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
3444 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
3445 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
3447 for_each_pipe(pipe
) {
3448 I915_WRITE(DSPCNTR(pipe
),
3449 I915_READ(DSPCNTR(pipe
)) |
3450 DISPPLANE_TRICKLE_FEED_DISABLE
);
3451 intel_flush_display_plane(dev_priv
, pipe
);
3454 gen7_setup_fixed_func_scheduler(dev_priv
);
3456 /* WaDisable4x2SubspanOptimization */
3457 I915_WRITE(CACHE_MODE_1
,
3458 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
3460 /* XXX: This is a workaround for early silicon revisions and should be
3465 WM_DBG_DISALLOW_MULTIPLE_LP
|
3466 WM_DBG_DISALLOW_SPRITE
|
3467 WM_DBG_DISALLOW_MAXFIFO
);
3471 static void ivybridge_init_clock_gating(struct drm_device
*dev
)
3473 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3475 uint32_t dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
;
3478 I915_WRITE(PCH_DSPCLK_GATE_D
, dspclk_gate
);
3480 I915_WRITE(WM3_LP_ILK
, 0);
3481 I915_WRITE(WM2_LP_ILK
, 0);
3482 I915_WRITE(WM1_LP_ILK
, 0);
3484 I915_WRITE(ILK_DSPCLK_GATE
, IVB_VRHUNIT_CLK_GATE
);
3486 I915_WRITE(IVB_CHICKEN3
,
3487 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
3488 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
3490 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3491 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
3492 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
3494 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3495 I915_WRITE(GEN7_L3CNTLREG1
,
3496 GEN7_WA_FOR_GEN7_L3_CONTROL
);
3497 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
3498 GEN7_WA_L3_CHICKEN_MODE
);
3500 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3501 * gating disable must be set. Failure to set it results in
3502 * flickering pixels due to Z write ordering failures after
3503 * some amount of runtime in the Mesa "fire" demo, and Unigine
3504 * Sanctuary and Tropics, and apparently anything else with
3505 * alpha test or pixel discard.
3507 * According to the spec, bit 11 (RCCUNIT) must also be set,
3508 * but we didn't debug actual testcases to find it out.
3510 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3511 * This implements the WaDisableRCZUnitClockGating workaround.
3513 I915_WRITE(GEN6_UCGCTL2
,
3514 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
|
3515 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
3517 /* This is required by WaCatErrorRejectionIssue */
3518 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
3519 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
3520 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
3522 for_each_pipe(pipe
) {
3523 I915_WRITE(DSPCNTR(pipe
),
3524 I915_READ(DSPCNTR(pipe
)) |
3525 DISPPLANE_TRICKLE_FEED_DISABLE
);
3526 intel_flush_display_plane(dev_priv
, pipe
);
3529 I915_WRITE(GEN6_MBCTL
, I915_READ(GEN6_MBCTL
) |
3530 GEN6_MBCTL_ENABLE_BOOT_FETCH
);
3532 gen7_setup_fixed_func_scheduler(dev_priv
);
3534 /* WaDisable4x2SubspanOptimization */
3535 I915_WRITE(CACHE_MODE_1
,
3536 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
3538 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
3539 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
3540 snpcr
|= GEN6_MBC_SNPCR_MED
;
3541 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
3544 static void valleyview_init_clock_gating(struct drm_device
*dev
)
3546 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3548 uint32_t dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
;
3550 I915_WRITE(PCH_DSPCLK_GATE_D
, dspclk_gate
);
3552 I915_WRITE(WM3_LP_ILK
, 0);
3553 I915_WRITE(WM2_LP_ILK
, 0);
3554 I915_WRITE(WM1_LP_ILK
, 0);
3556 I915_WRITE(ILK_DSPCLK_GATE
, IVB_VRHUNIT_CLK_GATE
);
3558 I915_WRITE(IVB_CHICKEN3
,
3559 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
3560 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
3562 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3563 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
3564 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
3566 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3567 I915_WRITE(GEN7_L3CNTLREG1
, GEN7_WA_FOR_GEN7_L3_CONTROL
);
3568 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
, GEN7_WA_L3_CHICKEN_MODE
);
3570 /* This is required by WaCatErrorRejectionIssue */
3571 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
3572 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
3573 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
3575 I915_WRITE(GEN6_MBCTL
, I915_READ(GEN6_MBCTL
) |
3576 GEN6_MBCTL_ENABLE_BOOT_FETCH
);
3579 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3580 * gating disable must be set. Failure to set it results in
3581 * flickering pixels due to Z write ordering failures after
3582 * some amount of runtime in the Mesa "fire" demo, and Unigine
3583 * Sanctuary and Tropics, and apparently anything else with
3584 * alpha test or pixel discard.
3586 * According to the spec, bit 11 (RCCUNIT) must also be set,
3587 * but we didn't debug actual testcases to find it out.
3589 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3590 * This implements the WaDisableRCZUnitClockGating workaround.
3592 * Also apply WaDisableVDSUnitClockGating and
3593 * WaDisableRCPBUnitClockGating.
3595 I915_WRITE(GEN6_UCGCTL2
,
3596 GEN7_VDSUNIT_CLOCK_GATE_DISABLE
|
3597 GEN7_TDLUNIT_CLOCK_GATE_DISABLE
|
3598 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
|
3599 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
3600 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
3602 I915_WRITE(GEN7_UCGCTL4
, GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
3604 for_each_pipe(pipe
) {
3605 I915_WRITE(DSPCNTR(pipe
),
3606 I915_READ(DSPCNTR(pipe
)) |
3607 DISPPLANE_TRICKLE_FEED_DISABLE
);
3608 intel_flush_display_plane(dev_priv
, pipe
);
3611 I915_WRITE(CACHE_MODE_1
,
3612 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
3615 * On ValleyView, the GUnit needs to signal the GT
3616 * when flip and other events complete. So enable
3617 * all the GUnit->GT interrupts here
3619 I915_WRITE(VLV_DPFLIPSTAT
, PIPEB_LINE_COMPARE_INT_EN
|
3620 PIPEB_HLINE_INT_EN
| PIPEB_VBLANK_INT_EN
|
3621 SPRITED_FLIPDONE_INT_EN
| SPRITEC_FLIPDONE_INT_EN
|
3622 PLANEB_FLIPDONE_INT_EN
| PIPEA_LINE_COMPARE_INT_EN
|
3623 PIPEA_HLINE_INT_EN
| PIPEA_VBLANK_INT_EN
|
3624 SPRITEB_FLIPDONE_INT_EN
| SPRITEA_FLIPDONE_INT_EN
|
3625 PLANEA_FLIPDONE_INT_EN
);
3628 static void g4x_init_clock_gating(struct drm_device
*dev
)
3630 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3631 uint32_t dspclk_gate
;
3633 I915_WRITE(RENCLK_GATE_D1
, 0);
3634 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
3635 GS_UNIT_CLOCK_GATE_DISABLE
|
3636 CL_UNIT_CLOCK_GATE_DISABLE
);
3637 I915_WRITE(RAMCLK_GATE_D
, 0);
3638 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
3639 OVRUNIT_CLOCK_GATE_DISABLE
|
3640 OVCUNIT_CLOCK_GATE_DISABLE
;
3642 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
3643 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
3646 static void crestline_init_clock_gating(struct drm_device
*dev
)
3648 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3650 I915_WRITE(RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
3651 I915_WRITE(RENCLK_GATE_D2
, 0);
3652 I915_WRITE(DSPCLK_GATE_D
, 0);
3653 I915_WRITE(RAMCLK_GATE_D
, 0);
3654 I915_WRITE16(DEUC
, 0);
3657 static void broadwater_init_clock_gating(struct drm_device
*dev
)
3659 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3661 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
3662 I965_RCC_CLOCK_GATE_DISABLE
|
3663 I965_RCPB_CLOCK_GATE_DISABLE
|
3664 I965_ISC_CLOCK_GATE_DISABLE
|
3665 I965_FBC_CLOCK_GATE_DISABLE
);
3666 I915_WRITE(RENCLK_GATE_D2
, 0);
3669 static void gen3_init_clock_gating(struct drm_device
*dev
)
3671 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3672 u32 dstate
= I915_READ(D_STATE
);
3674 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
3675 DSTATE_DOT_CLOCK_GATING
;
3676 I915_WRITE(D_STATE
, dstate
);
3678 if (IS_PINEVIEW(dev
))
3679 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
3681 /* IIR "flip pending" means done if this bit is set */
3682 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
3685 static void i85x_init_clock_gating(struct drm_device
*dev
)
3687 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3689 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
3692 static void i830_init_clock_gating(struct drm_device
*dev
)
3694 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3696 I915_WRITE(DSPCLK_GATE_D
, OVRUNIT_CLOCK_GATE_DISABLE
);
3699 static void ibx_init_clock_gating(struct drm_device
*dev
)
3701 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3704 * On Ibex Peak and Cougar Point, we need to disable clock
3705 * gating for the panel power sequencer or it will fail to
3706 * start up when no ports are active.
3708 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
3711 static void cpt_init_clock_gating(struct drm_device
*dev
)
3713 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3717 * On Ibex Peak and Cougar Point, we need to disable clock
3718 * gating for the panel power sequencer or it will fail to
3719 * start up when no ports are active.
3721 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
3722 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
3723 DPLS_EDP_PPS_FIX_DIS
);
3724 /* Without this, mode sets may fail silently on FDI */
3726 I915_WRITE(TRANS_CHICKEN2(pipe
), TRANS_AUTOTRAIN_GEN_STALL_DIS
);
3729 void intel_init_clock_gating(struct drm_device
*dev
)
3731 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3733 dev_priv
->display
.init_clock_gating(dev
);
3735 if (dev_priv
->display
.init_pch_clock_gating
)
3736 dev_priv
->display
.init_pch_clock_gating(dev
);
3739 static void gen6_sanitize_pm(struct drm_device
*dev
)
3741 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3742 u32 limits
, delay
, old
;
3744 gen6_gt_force_wake_get(dev_priv
);
3746 old
= limits
= I915_READ(GEN6_RP_INTERRUPT_LIMITS
);
3747 /* Make sure we continue to get interrupts
3748 * until we hit the minimum or maximum frequencies.
3750 limits
&= ~(0x3f << 16 | 0x3f << 24);
3751 delay
= dev_priv
->cur_delay
;
3752 if (delay
< dev_priv
->max_delay
)
3753 limits
|= (dev_priv
->max_delay
& 0x3f) << 24;
3754 if (delay
> dev_priv
->min_delay
)
3755 limits
|= (dev_priv
->min_delay
& 0x3f) << 16;
3757 if (old
!= limits
) {
3758 /* Note that the known failure case is to read back 0. */
3759 DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
3760 "expected %08x, was %08x\n", limits
, old
);
3761 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
, limits
);
3764 gen6_gt_force_wake_put(dev_priv
);
3767 void intel_sanitize_pm(struct drm_device
*dev
)
3769 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3771 if (dev_priv
->display
.sanitize_pm
)
3772 dev_priv
->display
.sanitize_pm(dev
);
3775 /* Starting with Haswell, we have different power wells for
3776 * different parts of the GPU. This attempts to enable them all.
3778 void intel_init_power_wells(struct drm_device
*dev
)
3780 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3781 unsigned long power_wells
[] = {
3788 if (!IS_HASWELL(dev
))
3791 mutex_lock(&dev
->struct_mutex
);
3793 for (i
= 0; i
< ARRAY_SIZE(power_wells
); i
++) {
3794 int well
= I915_READ(power_wells
[i
]);
3796 if ((well
& HSW_PWR_WELL_STATE
) == 0) {
3797 I915_WRITE(power_wells
[i
], well
& HSW_PWR_WELL_ENABLE
);
3798 if (wait_for(I915_READ(power_wells
[i
] & HSW_PWR_WELL_STATE
), 20))
3799 DRM_ERROR("Error enabling power well %lx\n", power_wells
[i
]);
3803 mutex_unlock(&dev
->struct_mutex
);
3806 /* Set up chip specific power management-related functions */
3807 void intel_init_pm(struct drm_device
*dev
)
3809 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3811 if (I915_HAS_FBC(dev
)) {
3812 if (HAS_PCH_SPLIT(dev
)) {
3813 dev_priv
->display
.fbc_enabled
= ironlake_fbc_enabled
;
3814 dev_priv
->display
.enable_fbc
= ironlake_enable_fbc
;
3815 dev_priv
->display
.disable_fbc
= ironlake_disable_fbc
;
3816 } else if (IS_GM45(dev
)) {
3817 dev_priv
->display
.fbc_enabled
= g4x_fbc_enabled
;
3818 dev_priv
->display
.enable_fbc
= g4x_enable_fbc
;
3819 dev_priv
->display
.disable_fbc
= g4x_disable_fbc
;
3820 } else if (IS_CRESTLINE(dev
)) {
3821 dev_priv
->display
.fbc_enabled
= i8xx_fbc_enabled
;
3822 dev_priv
->display
.enable_fbc
= i8xx_enable_fbc
;
3823 dev_priv
->display
.disable_fbc
= i8xx_disable_fbc
;
3825 /* 855GM needs testing */
3829 if (IS_PINEVIEW(dev
))
3830 i915_pineview_get_mem_freq(dev
);
3831 else if (IS_GEN5(dev
))
3832 i915_ironlake_get_mem_freq(dev
);
3834 /* For FIFO watermark updates */
3835 if (HAS_PCH_SPLIT(dev
)) {
3836 if (HAS_PCH_IBX(dev
))
3837 dev_priv
->display
.init_pch_clock_gating
= ibx_init_clock_gating
;
3838 else if (HAS_PCH_CPT(dev
))
3839 dev_priv
->display
.init_pch_clock_gating
= cpt_init_clock_gating
;
3842 if (I915_READ(MLTR_ILK
) & ILK_SRLT_MASK
)
3843 dev_priv
->display
.update_wm
= ironlake_update_wm
;
3845 DRM_DEBUG_KMS("Failed to get proper latency. "
3847 dev_priv
->display
.update_wm
= NULL
;
3849 dev_priv
->display
.init_clock_gating
= ironlake_init_clock_gating
;
3850 } else if (IS_GEN6(dev
)) {
3851 if (SNB_READ_WM0_LATENCY()) {
3852 dev_priv
->display
.update_wm
= sandybridge_update_wm
;
3853 dev_priv
->display
.update_sprite_wm
= sandybridge_update_sprite_wm
;
3855 DRM_DEBUG_KMS("Failed to read display plane latency. "
3857 dev_priv
->display
.update_wm
= NULL
;
3859 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
3860 dev_priv
->display
.sanitize_pm
= gen6_sanitize_pm
;
3861 } else if (IS_IVYBRIDGE(dev
)) {
3862 /* FIXME: detect B0+ stepping and use auto training */
3863 if (SNB_READ_WM0_LATENCY()) {
3864 dev_priv
->display
.update_wm
= sandybridge_update_wm
;
3865 dev_priv
->display
.update_sprite_wm
= sandybridge_update_sprite_wm
;
3867 DRM_DEBUG_KMS("Failed to read display plane latency. "
3869 dev_priv
->display
.update_wm
= NULL
;
3871 dev_priv
->display
.init_clock_gating
= ivybridge_init_clock_gating
;
3872 dev_priv
->display
.sanitize_pm
= gen6_sanitize_pm
;
3873 } else if (IS_HASWELL(dev
)) {
3874 if (SNB_READ_WM0_LATENCY()) {
3875 dev_priv
->display
.update_wm
= sandybridge_update_wm
;
3876 dev_priv
->display
.update_sprite_wm
= sandybridge_update_sprite_wm
;
3877 dev_priv
->display
.update_linetime_wm
= haswell_update_linetime_wm
;
3879 DRM_DEBUG_KMS("Failed to read display plane latency. "
3881 dev_priv
->display
.update_wm
= NULL
;
3883 dev_priv
->display
.init_clock_gating
= haswell_init_clock_gating
;
3884 dev_priv
->display
.sanitize_pm
= gen6_sanitize_pm
;
3886 dev_priv
->display
.update_wm
= NULL
;
3887 } else if (IS_VALLEYVIEW(dev
)) {
3888 dev_priv
->display
.update_wm
= valleyview_update_wm
;
3889 dev_priv
->display
.init_clock_gating
=
3890 valleyview_init_clock_gating
;
3891 } else if (IS_PINEVIEW(dev
)) {
3892 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev
),
3895 dev_priv
->mem_freq
)) {
3896 DRM_INFO("failed to find known CxSR latency "
3897 "(found ddr%s fsb freq %d, mem freq %d), "
3899 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
3900 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
3901 /* Disable CxSR and never update its watermark again */
3902 pineview_disable_cxsr(dev
);
3903 dev_priv
->display
.update_wm
= NULL
;
3905 dev_priv
->display
.update_wm
= pineview_update_wm
;
3906 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
3907 } else if (IS_G4X(dev
)) {
3908 dev_priv
->display
.update_wm
= g4x_update_wm
;
3909 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
3910 } else if (IS_GEN4(dev
)) {
3911 dev_priv
->display
.update_wm
= i965_update_wm
;
3912 if (IS_CRESTLINE(dev
))
3913 dev_priv
->display
.init_clock_gating
= crestline_init_clock_gating
;
3914 else if (IS_BROADWATER(dev
))
3915 dev_priv
->display
.init_clock_gating
= broadwater_init_clock_gating
;
3916 } else if (IS_GEN3(dev
)) {
3917 dev_priv
->display
.update_wm
= i9xx_update_wm
;
3918 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
3919 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
3920 } else if (IS_I865G(dev
)) {
3921 dev_priv
->display
.update_wm
= i830_update_wm
;
3922 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
3923 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
3924 } else if (IS_I85X(dev
)) {
3925 dev_priv
->display
.update_wm
= i9xx_update_wm
;
3926 dev_priv
->display
.get_fifo_size
= i85x_get_fifo_size
;
3927 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
3929 dev_priv
->display
.update_wm
= i830_update_wm
;
3930 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
3932 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
3934 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
3938 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private
*dev_priv
)
3940 u32 gt_thread_status_mask
;
3942 if (IS_HASWELL(dev_priv
->dev
))
3943 gt_thread_status_mask
= GEN6_GT_THREAD_STATUS_CORE_MASK_HSW
;
3945 gt_thread_status_mask
= GEN6_GT_THREAD_STATUS_CORE_MASK
;
3947 /* w/a for a sporadic read returning 0 by waiting for the GT
3948 * thread to wake up.
3950 if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG
) & gt_thread_status_mask
) == 0, 500))
3951 DRM_ERROR("GT thread status wait timed out\n");
3954 static void __gen6_gt_force_wake_get(struct drm_i915_private
*dev_priv
)
3958 if (IS_HASWELL(dev_priv
->dev
))
3959 forcewake_ack
= FORCEWAKE_ACK_HSW
;
3961 forcewake_ack
= FORCEWAKE_ACK
;
3963 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack
) & 1) == 0, 500))
3964 DRM_ERROR("Force wake wait timed out\n");
3966 I915_WRITE_NOTRACE(FORCEWAKE
, 1);
3967 POSTING_READ(FORCEWAKE
);
3969 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack
) & 1), 500))
3970 DRM_ERROR("Force wake wait timed out\n");
3972 __gen6_gt_wait_for_thread_c0(dev_priv
);
3975 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private
*dev_priv
)
3979 if (IS_HASWELL(dev_priv
->dev
))
3980 forcewake_ack
= FORCEWAKE_ACK_HSW
;
3982 forcewake_ack
= FORCEWAKE_MT_ACK
;
3984 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack
) & 1) == 0, 500))
3985 DRM_ERROR("Force wake wait timed out\n");
3987 I915_WRITE_NOTRACE(FORCEWAKE_MT
, _MASKED_BIT_ENABLE(1));
3988 POSTING_READ(FORCEWAKE_MT
);
3990 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack
) & 1), 500))
3991 DRM_ERROR("Force wake wait timed out\n");
3993 __gen6_gt_wait_for_thread_c0(dev_priv
);
3997 * Generally this is called implicitly by the register read function. However,
3998 * if some sequence requires the GT to not power down then this function should
3999 * be called at the beginning of the sequence followed by a call to
4000 * gen6_gt_force_wake_put() at the end of the sequence.
4002 void gen6_gt_force_wake_get(struct drm_i915_private
*dev_priv
)
4004 unsigned long irqflags
;
4006 spin_lock_irqsave(&dev_priv
->gt_lock
, irqflags
);
4007 if (dev_priv
->forcewake_count
++ == 0)
4008 dev_priv
->gt
.force_wake_get(dev_priv
);
4009 spin_unlock_irqrestore(&dev_priv
->gt_lock
, irqflags
);
4012 void gen6_gt_check_fifodbg(struct drm_i915_private
*dev_priv
)
4015 gtfifodbg
= I915_READ_NOTRACE(GTFIFODBG
);
4016 if (WARN(gtfifodbg
& GT_FIFO_CPU_ERROR_MASK
,
4017 "MMIO read or write has been dropped %x\n", gtfifodbg
))
4018 I915_WRITE_NOTRACE(GTFIFODBG
, GT_FIFO_CPU_ERROR_MASK
);
4021 static void __gen6_gt_force_wake_put(struct drm_i915_private
*dev_priv
)
4023 I915_WRITE_NOTRACE(FORCEWAKE
, 0);
4024 POSTING_READ(FORCEWAKE
);
4025 gen6_gt_check_fifodbg(dev_priv
);
4028 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private
*dev_priv
)
4030 I915_WRITE_NOTRACE(FORCEWAKE_MT
, _MASKED_BIT_DISABLE(1));
4031 POSTING_READ(FORCEWAKE_MT
);
4032 gen6_gt_check_fifodbg(dev_priv
);
4036 * see gen6_gt_force_wake_get()
4038 void gen6_gt_force_wake_put(struct drm_i915_private
*dev_priv
)
4040 unsigned long irqflags
;
4042 spin_lock_irqsave(&dev_priv
->gt_lock
, irqflags
);
4043 if (--dev_priv
->forcewake_count
== 0)
4044 dev_priv
->gt
.force_wake_put(dev_priv
);
4045 spin_unlock_irqrestore(&dev_priv
->gt_lock
, irqflags
);
4048 int __gen6_gt_wait_for_fifo(struct drm_i915_private
*dev_priv
)
4052 if (dev_priv
->gt_fifo_count
< GT_FIFO_NUM_RESERVED_ENTRIES
) {
4054 u32 fifo
= I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES
);
4055 while (fifo
<= GT_FIFO_NUM_RESERVED_ENTRIES
&& loop
--) {
4057 fifo
= I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES
);
4059 if (WARN_ON(loop
< 0 && fifo
<= GT_FIFO_NUM_RESERVED_ENTRIES
))
4061 dev_priv
->gt_fifo_count
= fifo
;
4063 dev_priv
->gt_fifo_count
--;
4068 static void vlv_force_wake_get(struct drm_i915_private
*dev_priv
)
4070 /* Already awake? */
4071 if ((I915_READ(0x130094) & 0xa1) == 0xa1)
4074 I915_WRITE_NOTRACE(FORCEWAKE_VLV
, 0xffffffff);
4075 POSTING_READ(FORCEWAKE_VLV
);
4077 if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV
) & 1), 500))
4078 DRM_ERROR("Force wake wait timed out\n");
4080 __gen6_gt_wait_for_thread_c0(dev_priv
);
4083 static void vlv_force_wake_put(struct drm_i915_private
*dev_priv
)
4085 I915_WRITE_NOTRACE(FORCEWAKE_VLV
, 0xffff0000);
4086 /* FIXME: confirm VLV behavior with Punit folks */
4087 POSTING_READ(FORCEWAKE_VLV
);
4090 void intel_gt_init(struct drm_device
*dev
)
4092 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4094 spin_lock_init(&dev_priv
->gt_lock
);
4096 if (IS_VALLEYVIEW(dev
)) {
4097 dev_priv
->gt
.force_wake_get
= vlv_force_wake_get
;
4098 dev_priv
->gt
.force_wake_put
= vlv_force_wake_put
;
4099 } else if (INTEL_INFO(dev
)->gen
>= 6) {
4100 dev_priv
->gt
.force_wake_get
= __gen6_gt_force_wake_get
;
4101 dev_priv
->gt
.force_wake_put
= __gen6_gt_force_wake_put
;
4103 /* IVB configs may use multi-threaded forcewake */
4104 if (IS_IVYBRIDGE(dev
) || IS_HASWELL(dev
)) {
4107 /* A small trick here - if the bios hasn't configured
4108 * MT forcewake, and if the device is in RC6, then
4109 * force_wake_mt_get will not wake the device and the
4110 * ECOBUS read will return zero. Which will be
4111 * (correctly) interpreted by the test below as MT
4112 * forcewake being disabled.
4114 mutex_lock(&dev
->struct_mutex
);
4115 __gen6_gt_force_wake_mt_get(dev_priv
);
4116 ecobus
= I915_READ_NOTRACE(ECOBUS
);
4117 __gen6_gt_force_wake_mt_put(dev_priv
);
4118 mutex_unlock(&dev
->struct_mutex
);
4120 if (ecobus
& FORCEWAKE_MT_ENABLE
) {
4121 DRM_DEBUG_KMS("Using MT version of forcewake\n");
4122 dev_priv
->gt
.force_wake_get
=
4123 __gen6_gt_force_wake_mt_get
;
4124 dev_priv
->gt
.force_wake_put
=
4125 __gen6_gt_force_wake_mt_put
;