1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
41 * DOC: interrupt handling
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
48 static const u32 hpd_ilk
[HPD_NUM_PINS
] = {
49 [HPD_PORT_A
] = DE_DP_A_HOTPLUG
,
52 static const u32 hpd_ivb
[HPD_NUM_PINS
] = {
53 [HPD_PORT_A
] = DE_DP_A_HOTPLUG_IVB
,
56 static const u32 hpd_bdw
[HPD_NUM_PINS
] = {
57 [HPD_PORT_A
] = GEN8_PORT_DP_A_HOTPLUG
,
60 static const u32 hpd_ibx
[HPD_NUM_PINS
] = {
61 [HPD_CRT
] = SDE_CRT_HOTPLUG
,
62 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG
,
63 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG
,
64 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG
,
65 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG
68 static const u32 hpd_cpt
[HPD_NUM_PINS
] = {
69 [HPD_CRT
] = SDE_CRT_HOTPLUG_CPT
,
70 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG_CPT
,
71 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG_CPT
,
72 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG_CPT
,
73 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG_CPT
76 static const u32 hpd_spt
[HPD_NUM_PINS
] = {
77 [HPD_PORT_A
] = SDE_PORTA_HOTPLUG_SPT
,
78 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG_CPT
,
79 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG_CPT
,
80 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG_CPT
,
81 [HPD_PORT_E
] = SDE_PORTE_HOTPLUG_SPT
84 static const u32 hpd_mask_i915
[HPD_NUM_PINS
] = {
85 [HPD_CRT
] = CRT_HOTPLUG_INT_EN
,
86 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_EN
,
87 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_EN
,
88 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_EN
,
89 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_EN
,
90 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_EN
93 static const u32 hpd_status_g4x
[HPD_NUM_PINS
] = {
94 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
95 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_G4X
,
96 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_G4X
,
97 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
98 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
99 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
102 static const u32 hpd_status_i915
[HPD_NUM_PINS
] = {
103 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
104 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_I915
,
105 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_I915
,
106 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
107 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
108 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
112 static const u32 hpd_bxt
[HPD_NUM_PINS
] = {
113 [HPD_PORT_A
] = BXT_DE_PORT_HP_DDIA
,
114 [HPD_PORT_B
] = BXT_DE_PORT_HP_DDIB
,
115 [HPD_PORT_C
] = BXT_DE_PORT_HP_DDIC
118 /* IIR can theoretically queue up two events. Be paranoid. */
119 #define GEN8_IRQ_RESET_NDX(type, which) do { \
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
129 #define GEN5_IRQ_RESET(type) do { \
130 I915_WRITE(type##IMR, 0xffffffff); \
131 POSTING_READ(type##IMR); \
132 I915_WRITE(type##IER, 0); \
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
142 static void gen5_assert_iir_is_zero(struct drm_i915_private
*dev_priv
, u32 reg
)
144 u32 val
= I915_READ(reg
);
149 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
151 I915_WRITE(reg
, 0xffffffff);
153 I915_WRITE(reg
, 0xffffffff);
157 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
158 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
159 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
160 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
161 POSTING_READ(GEN8_##type##_IMR(which)); \
164 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
165 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
166 I915_WRITE(type##IER, (ier_val)); \
167 I915_WRITE(type##IMR, (imr_val)); \
168 POSTING_READ(type##IMR); \
171 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
);
173 /* For display hotplug interrupt */
175 i915_hotplug_interrupt_update_locked(struct drm_i915_private
*dev_priv
,
181 assert_spin_locked(&dev_priv
->irq_lock
);
182 WARN_ON(bits
& ~mask
);
184 val
= I915_READ(PORT_HOTPLUG_EN
);
187 I915_WRITE(PORT_HOTPLUG_EN
, val
);
191 * i915_hotplug_interrupt_update - update hotplug interrupt enable
192 * @dev_priv: driver private
193 * @mask: bits to update
194 * @bits: bits to enable
195 * NOTE: the HPD enable bits are modified both inside and outside
196 * of an interrupt context. To avoid that read-modify-write cycles
197 * interfer, these bits are protected by a spinlock. Since this
198 * function is usually not called from a context where the lock is
199 * held already, this function acquires the lock itself. A non-locking
200 * version is also available.
202 void i915_hotplug_interrupt_update(struct drm_i915_private
*dev_priv
,
206 spin_lock_irq(&dev_priv
->irq_lock
);
207 i915_hotplug_interrupt_update_locked(dev_priv
, mask
, bits
);
208 spin_unlock_irq(&dev_priv
->irq_lock
);
212 * ilk_update_display_irq - update DEIMR
213 * @dev_priv: driver private
214 * @interrupt_mask: mask of interrupt bits to update
215 * @enabled_irq_mask: mask of interrupt bits to enable
217 static void ilk_update_display_irq(struct drm_i915_private
*dev_priv
,
218 uint32_t interrupt_mask
,
219 uint32_t enabled_irq_mask
)
223 assert_spin_locked(&dev_priv
->irq_lock
);
225 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
227 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
230 new_val
= dev_priv
->irq_mask
;
231 new_val
&= ~interrupt_mask
;
232 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
234 if (new_val
!= dev_priv
->irq_mask
) {
235 dev_priv
->irq_mask
= new_val
;
236 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
242 ironlake_enable_display_irq(struct drm_i915_private
*dev_priv
, u32 mask
)
244 ilk_update_display_irq(dev_priv
, mask
, mask
);
248 ironlake_disable_display_irq(struct drm_i915_private
*dev_priv
, u32 mask
)
250 ilk_update_display_irq(dev_priv
, mask
, 0);
254 * ilk_update_gt_irq - update GTIMR
255 * @dev_priv: driver private
256 * @interrupt_mask: mask of interrupt bits to update
257 * @enabled_irq_mask: mask of interrupt bits to enable
259 static void ilk_update_gt_irq(struct drm_i915_private
*dev_priv
,
260 uint32_t interrupt_mask
,
261 uint32_t enabled_irq_mask
)
263 assert_spin_locked(&dev_priv
->irq_lock
);
265 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
267 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
270 dev_priv
->gt_irq_mask
&= ~interrupt_mask
;
271 dev_priv
->gt_irq_mask
|= (~enabled_irq_mask
& interrupt_mask
);
272 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
276 void gen5_enable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
278 ilk_update_gt_irq(dev_priv
, mask
, mask
);
281 void gen5_disable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
283 ilk_update_gt_irq(dev_priv
, mask
, 0);
286 static u32
gen6_pm_iir(struct drm_i915_private
*dev_priv
)
288 return INTEL_INFO(dev_priv
)->gen
>= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR
;
291 static u32
gen6_pm_imr(struct drm_i915_private
*dev_priv
)
293 return INTEL_INFO(dev_priv
)->gen
>= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR
;
296 static u32
gen6_pm_ier(struct drm_i915_private
*dev_priv
)
298 return INTEL_INFO(dev_priv
)->gen
>= 8 ? GEN8_GT_IER(2) : GEN6_PMIER
;
302 * snb_update_pm_irq - update GEN6_PMIMR
303 * @dev_priv: driver private
304 * @interrupt_mask: mask of interrupt bits to update
305 * @enabled_irq_mask: mask of interrupt bits to enable
307 static void snb_update_pm_irq(struct drm_i915_private
*dev_priv
,
308 uint32_t interrupt_mask
,
309 uint32_t enabled_irq_mask
)
313 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
315 assert_spin_locked(&dev_priv
->irq_lock
);
317 new_val
= dev_priv
->pm_irq_mask
;
318 new_val
&= ~interrupt_mask
;
319 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
321 if (new_val
!= dev_priv
->pm_irq_mask
) {
322 dev_priv
->pm_irq_mask
= new_val
;
323 I915_WRITE(gen6_pm_imr(dev_priv
), dev_priv
->pm_irq_mask
);
324 POSTING_READ(gen6_pm_imr(dev_priv
));
328 void gen6_enable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
330 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
333 snb_update_pm_irq(dev_priv
, mask
, mask
);
336 static void __gen6_disable_pm_irq(struct drm_i915_private
*dev_priv
,
339 snb_update_pm_irq(dev_priv
, mask
, 0);
342 void gen6_disable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
344 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
347 __gen6_disable_pm_irq(dev_priv
, mask
);
350 void gen6_reset_rps_interrupts(struct drm_device
*dev
)
352 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
353 uint32_t reg
= gen6_pm_iir(dev_priv
);
355 spin_lock_irq(&dev_priv
->irq_lock
);
356 I915_WRITE(reg
, dev_priv
->pm_rps_events
);
357 I915_WRITE(reg
, dev_priv
->pm_rps_events
);
359 dev_priv
->rps
.pm_iir
= 0;
360 spin_unlock_irq(&dev_priv
->irq_lock
);
363 void gen6_enable_rps_interrupts(struct drm_device
*dev
)
365 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
367 spin_lock_irq(&dev_priv
->irq_lock
);
369 WARN_ON(dev_priv
->rps
.pm_iir
);
370 WARN_ON(I915_READ(gen6_pm_iir(dev_priv
)) & dev_priv
->pm_rps_events
);
371 dev_priv
->rps
.interrupts_enabled
= true;
372 I915_WRITE(gen6_pm_ier(dev_priv
), I915_READ(gen6_pm_ier(dev_priv
)) |
373 dev_priv
->pm_rps_events
);
374 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
376 spin_unlock_irq(&dev_priv
->irq_lock
);
379 u32
gen6_sanitize_rps_pm_mask(struct drm_i915_private
*dev_priv
, u32 mask
)
382 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
383 * if GEN6_PM_UP_EI_EXPIRED is masked.
385 * TODO: verify if this can be reproduced on VLV,CHV.
387 if (INTEL_INFO(dev_priv
)->gen
<= 7 && !IS_HASWELL(dev_priv
))
388 mask
&= ~GEN6_PM_RP_UP_EI_EXPIRED
;
390 if (INTEL_INFO(dev_priv
)->gen
>= 8)
391 mask
&= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP
;
396 void gen6_disable_rps_interrupts(struct drm_device
*dev
)
398 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
400 spin_lock_irq(&dev_priv
->irq_lock
);
401 dev_priv
->rps
.interrupts_enabled
= false;
402 spin_unlock_irq(&dev_priv
->irq_lock
);
404 cancel_work_sync(&dev_priv
->rps
.work
);
406 spin_lock_irq(&dev_priv
->irq_lock
);
408 I915_WRITE(GEN6_PMINTRMSK
, gen6_sanitize_rps_pm_mask(dev_priv
, ~0));
410 __gen6_disable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
411 I915_WRITE(gen6_pm_ier(dev_priv
), I915_READ(gen6_pm_ier(dev_priv
)) &
412 ~dev_priv
->pm_rps_events
);
414 spin_unlock_irq(&dev_priv
->irq_lock
);
416 synchronize_irq(dev
->irq
);
420 * bdw_update_port_irq - update DE port interrupt
421 * @dev_priv: driver private
422 * @interrupt_mask: mask of interrupt bits to update
423 * @enabled_irq_mask: mask of interrupt bits to enable
425 static void bdw_update_port_irq(struct drm_i915_private
*dev_priv
,
426 uint32_t interrupt_mask
,
427 uint32_t enabled_irq_mask
)
432 assert_spin_locked(&dev_priv
->irq_lock
);
434 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
436 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
439 old_val
= I915_READ(GEN8_DE_PORT_IMR
);
442 new_val
&= ~interrupt_mask
;
443 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
445 if (new_val
!= old_val
) {
446 I915_WRITE(GEN8_DE_PORT_IMR
, new_val
);
447 POSTING_READ(GEN8_DE_PORT_IMR
);
452 * ibx_display_interrupt_update - update SDEIMR
453 * @dev_priv: driver private
454 * @interrupt_mask: mask of interrupt bits to update
455 * @enabled_irq_mask: mask of interrupt bits to enable
457 void ibx_display_interrupt_update(struct drm_i915_private
*dev_priv
,
458 uint32_t interrupt_mask
,
459 uint32_t enabled_irq_mask
)
461 uint32_t sdeimr
= I915_READ(SDEIMR
);
462 sdeimr
&= ~interrupt_mask
;
463 sdeimr
|= (~enabled_irq_mask
& interrupt_mask
);
465 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
467 assert_spin_locked(&dev_priv
->irq_lock
);
469 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
472 I915_WRITE(SDEIMR
, sdeimr
);
473 POSTING_READ(SDEIMR
);
477 __i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
478 u32 enable_mask
, u32 status_mask
)
480 u32 reg
= PIPESTAT(pipe
);
481 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
483 assert_spin_locked(&dev_priv
->irq_lock
);
484 WARN_ON(!intel_irqs_enabled(dev_priv
));
486 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
487 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
488 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
489 pipe_name(pipe
), enable_mask
, status_mask
))
492 if ((pipestat
& enable_mask
) == enable_mask
)
495 dev_priv
->pipestat_irq_mask
[pipe
] |= status_mask
;
497 /* Enable the interrupt, clear any pending status */
498 pipestat
|= enable_mask
| status_mask
;
499 I915_WRITE(reg
, pipestat
);
504 __i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
505 u32 enable_mask
, u32 status_mask
)
507 u32 reg
= PIPESTAT(pipe
);
508 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
510 assert_spin_locked(&dev_priv
->irq_lock
);
511 WARN_ON(!intel_irqs_enabled(dev_priv
));
513 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
514 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
515 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
516 pipe_name(pipe
), enable_mask
, status_mask
))
519 if ((pipestat
& enable_mask
) == 0)
522 dev_priv
->pipestat_irq_mask
[pipe
] &= ~status_mask
;
524 pipestat
&= ~enable_mask
;
525 I915_WRITE(reg
, pipestat
);
529 static u32
vlv_get_pipestat_enable_mask(struct drm_device
*dev
, u32 status_mask
)
531 u32 enable_mask
= status_mask
<< 16;
534 * On pipe A we don't support the PSR interrupt yet,
535 * on pipe B and C the same bit MBZ.
537 if (WARN_ON_ONCE(status_mask
& PIPE_A_PSR_STATUS_VLV
))
540 * On pipe B and C we don't support the PSR interrupt yet, on pipe
541 * A the same bit is for perf counters which we don't use either.
543 if (WARN_ON_ONCE(status_mask
& PIPE_B_PSR_STATUS_VLV
))
546 enable_mask
&= ~(PIPE_FIFO_UNDERRUN_STATUS
|
547 SPRITE0_FLIP_DONE_INT_EN_VLV
|
548 SPRITE1_FLIP_DONE_INT_EN_VLV
);
549 if (status_mask
& SPRITE0_FLIP_DONE_INT_STATUS_VLV
)
550 enable_mask
|= SPRITE0_FLIP_DONE_INT_EN_VLV
;
551 if (status_mask
& SPRITE1_FLIP_DONE_INT_STATUS_VLV
)
552 enable_mask
|= SPRITE1_FLIP_DONE_INT_EN_VLV
;
558 i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
563 if (IS_VALLEYVIEW(dev_priv
->dev
))
564 enable_mask
= vlv_get_pipestat_enable_mask(dev_priv
->dev
,
567 enable_mask
= status_mask
<< 16;
568 __i915_enable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
572 i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
577 if (IS_VALLEYVIEW(dev_priv
->dev
))
578 enable_mask
= vlv_get_pipestat_enable_mask(dev_priv
->dev
,
581 enable_mask
= status_mask
<< 16;
582 __i915_disable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
586 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
589 static void i915_enable_asle_pipestat(struct drm_device
*dev
)
591 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
593 if (!dev_priv
->opregion
.asle
|| !IS_MOBILE(dev
))
596 spin_lock_irq(&dev_priv
->irq_lock
);
598 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_LEGACY_BLC_EVENT_STATUS
);
599 if (INTEL_INFO(dev
)->gen
>= 4)
600 i915_enable_pipestat(dev_priv
, PIPE_A
,
601 PIPE_LEGACY_BLC_EVENT_STATUS
);
603 spin_unlock_irq(&dev_priv
->irq_lock
);
607 * This timing diagram depicts the video signal in and
608 * around the vertical blanking period.
610 * Assumptions about the fictitious mode used in this example:
612 * vsync_start = vblank_start + 1
613 * vsync_end = vblank_start + 2
614 * vtotal = vblank_start + 3
617 * latch double buffered registers
618 * increment frame counter (ctg+)
619 * generate start of vblank interrupt (gen4+)
622 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
623 * | may be shifted forward 1-3 extra lines via PIPECONF
625 * | | start of vsync:
626 * | | generate vsync interrupt
628 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
629 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
630 * ----va---> <-----------------vb--------------------> <--------va-------------
631 * | | <----vs-----> |
632 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
633 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
634 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
636 * last visible pixel first visible pixel
637 * | increment frame counter (gen3/4)
638 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
640 * x = horizontal active
641 * _ = horizontal blanking
642 * hs = horizontal sync
643 * va = vertical active
644 * vb = vertical blanking
646 * vbs = vblank_start (number)
649 * - most events happen at the start of horizontal sync
650 * - frame start happens at the start of horizontal blank, 1-4 lines
651 * (depending on PIPECONF settings) after the start of vblank
652 * - gen3/4 pixel and frame counter are synchronized with the start
653 * of horizontal active on the first line of vertical active
656 static u32
i8xx_get_vblank_counter(struct drm_device
*dev
, unsigned int pipe
)
658 /* Gen2 doesn't have a hardware frame counter */
662 /* Called from drm generic code, passed a 'crtc', which
663 * we use as a pipe index
665 static u32
i915_get_vblank_counter(struct drm_device
*dev
, unsigned int pipe
)
667 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
668 unsigned long high_frame
;
669 unsigned long low_frame
;
670 u32 high1
, high2
, low
, pixel
, vbl_start
, hsync_start
, htotal
;
671 struct intel_crtc
*intel_crtc
=
672 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
673 const struct drm_display_mode
*mode
= &intel_crtc
->base
.hwmode
;
675 htotal
= mode
->crtc_htotal
;
676 hsync_start
= mode
->crtc_hsync_start
;
677 vbl_start
= mode
->crtc_vblank_start
;
678 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
679 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
681 /* Convert to pixel count */
684 /* Start of vblank event occurs at start of hsync */
685 vbl_start
-= htotal
- hsync_start
;
687 high_frame
= PIPEFRAME(pipe
);
688 low_frame
= PIPEFRAMEPIXEL(pipe
);
691 * High & low register fields aren't synchronized, so make sure
692 * we get a low value that's stable across two reads of the high
696 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
697 low
= I915_READ(low_frame
);
698 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
699 } while (high1
!= high2
);
701 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
702 pixel
= low
& PIPE_PIXEL_MASK
;
703 low
>>= PIPE_FRAME_LOW_SHIFT
;
706 * The frame counter increments at beginning of active.
707 * Cook up a vblank counter by also checking the pixel
708 * counter against vblank start.
710 return (((high1
<< 8) | low
) + (pixel
>= vbl_start
)) & 0xffffff;
713 static u32
g4x_get_vblank_counter(struct drm_device
*dev
, unsigned int pipe
)
715 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
717 return I915_READ(PIPE_FRMCOUNT_G4X(pipe
));
720 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
721 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
723 static int __intel_get_crtc_scanline(struct intel_crtc
*crtc
)
725 struct drm_device
*dev
= crtc
->base
.dev
;
726 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
727 const struct drm_display_mode
*mode
= &crtc
->base
.hwmode
;
728 enum pipe pipe
= crtc
->pipe
;
729 int position
, vtotal
;
731 vtotal
= mode
->crtc_vtotal
;
732 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
736 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN2
;
738 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN3
;
741 * On HSW, the DSL reg (0x70000) appears to return 0 if we
742 * read it just before the start of vblank. So try it again
743 * so we don't accidentally end up spanning a vblank frame
744 * increment, causing the pipe_update_end() code to squak at us.
746 * The nature of this problem means we can't simply check the ISR
747 * bit and return the vblank start value; nor can we use the scanline
748 * debug register in the transcoder as it appears to have the same
749 * problem. We may need to extend this to include other platforms,
750 * but so far testing only shows the problem on HSW.
752 if (HAS_DDI(dev
) && !position
) {
755 for (i
= 0; i
< 100; i
++) {
757 temp
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) &
759 if (temp
!= position
) {
767 * See update_scanline_offset() for the details on the
768 * scanline_offset adjustment.
770 return (position
+ crtc
->scanline_offset
) % vtotal
;
773 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, unsigned int pipe
,
774 unsigned int flags
, int *vpos
, int *hpos
,
775 ktime_t
*stime
, ktime_t
*etime
,
776 const struct drm_display_mode
*mode
)
778 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
779 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
780 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
782 int vbl_start
, vbl_end
, hsync_start
, htotal
, vtotal
;
785 unsigned long irqflags
;
787 if (WARN_ON(!mode
->crtc_clock
)) {
788 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
789 "pipe %c\n", pipe_name(pipe
));
793 htotal
= mode
->crtc_htotal
;
794 hsync_start
= mode
->crtc_hsync_start
;
795 vtotal
= mode
->crtc_vtotal
;
796 vbl_start
= mode
->crtc_vblank_start
;
797 vbl_end
= mode
->crtc_vblank_end
;
799 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
800 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
805 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
808 * Lock uncore.lock, as we will do multiple timing critical raw
809 * register reads, potentially with preemption disabled, so the
810 * following code must not block on uncore.lock.
812 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
814 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
816 /* Get optional system timestamp before query. */
818 *stime
= ktime_get();
820 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
821 /* No obvious pixelcount register. Only query vertical
822 * scanout position from Display scan line register.
824 position
= __intel_get_crtc_scanline(intel_crtc
);
826 /* Have access to pixelcount since start of frame.
827 * We can split this into vertical and horizontal
830 position
= (__raw_i915_read32(dev_priv
, PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
832 /* convert to pixel counts */
838 * In interlaced modes, the pixel counter counts all pixels,
839 * so one field will have htotal more pixels. In order to avoid
840 * the reported position from jumping backwards when the pixel
841 * counter is beyond the length of the shorter field, just
842 * clamp the position the length of the shorter field. This
843 * matches how the scanline counter based position works since
844 * the scanline counter doesn't count the two half lines.
846 if (position
>= vtotal
)
847 position
= vtotal
- 1;
850 * Start of vblank interrupt is triggered at start of hsync,
851 * just prior to the first active line of vblank. However we
852 * consider lines to start at the leading edge of horizontal
853 * active. So, should we get here before we've crossed into
854 * the horizontal active of the first line in vblank, we would
855 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
856 * always add htotal-hsync_start to the current pixel position.
858 position
= (position
+ htotal
- hsync_start
) % vtotal
;
861 /* Get optional system timestamp after query. */
863 *etime
= ktime_get();
865 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
867 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
869 in_vbl
= position
>= vbl_start
&& position
< vbl_end
;
872 * While in vblank, position will be negative
873 * counting up towards 0 at vbl_end. And outside
874 * vblank, position will be positive counting
877 if (position
>= vbl_start
)
880 position
+= vtotal
- vbl_end
;
882 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
886 *vpos
= position
/ htotal
;
887 *hpos
= position
- (*vpos
* htotal
);
892 ret
|= DRM_SCANOUTPOS_IN_VBLANK
;
897 int intel_get_crtc_scanline(struct intel_crtc
*crtc
)
899 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
900 unsigned long irqflags
;
903 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
904 position
= __intel_get_crtc_scanline(crtc
);
905 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
910 static int i915_get_vblank_timestamp(struct drm_device
*dev
, unsigned int pipe
,
912 struct timeval
*vblank_time
,
915 struct drm_crtc
*crtc
;
917 if (pipe
>= INTEL_INFO(dev
)->num_pipes
) {
918 DRM_ERROR("Invalid crtc %u\n", pipe
);
922 /* Get drm_crtc to timestamp: */
923 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
925 DRM_ERROR("Invalid crtc %u\n", pipe
);
929 if (!crtc
->hwmode
.crtc_clock
) {
930 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe
);
934 /* Helper routine in DRM core does all the work: */
935 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
940 static void ironlake_rps_change_irq_handler(struct drm_device
*dev
)
942 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
943 u32 busy_up
, busy_down
, max_avg
, min_avg
;
946 spin_lock(&mchdev_lock
);
948 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
950 new_delay
= dev_priv
->ips
.cur_delay
;
952 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
953 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
954 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
955 max_avg
= I915_READ(RCBMAXAVG
);
956 min_avg
= I915_READ(RCBMINAVG
);
958 /* Handle RCS change request from hw */
959 if (busy_up
> max_avg
) {
960 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
961 new_delay
= dev_priv
->ips
.cur_delay
- 1;
962 if (new_delay
< dev_priv
->ips
.max_delay
)
963 new_delay
= dev_priv
->ips
.max_delay
;
964 } else if (busy_down
< min_avg
) {
965 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
966 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
967 if (new_delay
> dev_priv
->ips
.min_delay
)
968 new_delay
= dev_priv
->ips
.min_delay
;
971 if (ironlake_set_drps(dev
, new_delay
))
972 dev_priv
->ips
.cur_delay
= new_delay
;
974 spin_unlock(&mchdev_lock
);
979 static void notify_ring(struct intel_engine_cs
*ring
)
981 if (!intel_ring_initialized(ring
))
984 trace_i915_gem_request_notify(ring
);
986 wake_up_all(&ring
->irq_queue
);
989 static void vlv_c0_read(struct drm_i915_private
*dev_priv
,
990 struct intel_rps_ei
*ei
)
992 ei
->cz_clock
= vlv_punit_read(dev_priv
, PUNIT_REG_CZ_TIMESTAMP
);
993 ei
->render_c0
= I915_READ(VLV_RENDER_C0_COUNT
);
994 ei
->media_c0
= I915_READ(VLV_MEDIA_C0_COUNT
);
997 static bool vlv_c0_above(struct drm_i915_private
*dev_priv
,
998 const struct intel_rps_ei
*old
,
999 const struct intel_rps_ei
*now
,
1003 unsigned int mul
= 100;
1005 if (old
->cz_clock
== 0)
1008 if (I915_READ(VLV_COUNTER_CONTROL
) & VLV_COUNT_RANGE_HIGH
)
1011 time
= now
->cz_clock
- old
->cz_clock
;
1012 time
*= threshold
* dev_priv
->czclk_freq
;
1014 /* Workload can be split between render + media, e.g. SwapBuffers
1015 * being blitted in X after being rendered in mesa. To account for
1016 * this we need to combine both engines into our activity counter.
1018 c0
= now
->render_c0
- old
->render_c0
;
1019 c0
+= now
->media_c0
- old
->media_c0
;
1020 c0
*= mul
* VLV_CZ_CLOCK_TO_MILLI_SEC
;
1025 void gen6_rps_reset_ei(struct drm_i915_private
*dev_priv
)
1027 vlv_c0_read(dev_priv
, &dev_priv
->rps
.down_ei
);
1028 dev_priv
->rps
.up_ei
= dev_priv
->rps
.down_ei
;
1031 static u32
vlv_wa_c0_ei(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1033 struct intel_rps_ei now
;
1036 if ((pm_iir
& (GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_UP_EI_EXPIRED
)) == 0)
1039 vlv_c0_read(dev_priv
, &now
);
1040 if (now
.cz_clock
== 0)
1043 if (pm_iir
& GEN6_PM_RP_DOWN_EI_EXPIRED
) {
1044 if (!vlv_c0_above(dev_priv
,
1045 &dev_priv
->rps
.down_ei
, &now
,
1046 dev_priv
->rps
.down_threshold
))
1047 events
|= GEN6_PM_RP_DOWN_THRESHOLD
;
1048 dev_priv
->rps
.down_ei
= now
;
1051 if (pm_iir
& GEN6_PM_RP_UP_EI_EXPIRED
) {
1052 if (vlv_c0_above(dev_priv
,
1053 &dev_priv
->rps
.up_ei
, &now
,
1054 dev_priv
->rps
.up_threshold
))
1055 events
|= GEN6_PM_RP_UP_THRESHOLD
;
1056 dev_priv
->rps
.up_ei
= now
;
1062 static bool any_waiters(struct drm_i915_private
*dev_priv
)
1064 struct intel_engine_cs
*ring
;
1067 for_each_ring(ring
, dev_priv
, i
)
1068 if (ring
->irq_refcount
)
1074 static void gen6_pm_rps_work(struct work_struct
*work
)
1076 struct drm_i915_private
*dev_priv
=
1077 container_of(work
, struct drm_i915_private
, rps
.work
);
1079 int new_delay
, adj
, min
, max
;
1082 spin_lock_irq(&dev_priv
->irq_lock
);
1083 /* Speed up work cancelation during disabling rps interrupts. */
1084 if (!dev_priv
->rps
.interrupts_enabled
) {
1085 spin_unlock_irq(&dev_priv
->irq_lock
);
1088 pm_iir
= dev_priv
->rps
.pm_iir
;
1089 dev_priv
->rps
.pm_iir
= 0;
1090 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1091 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
1092 client_boost
= dev_priv
->rps
.client_boost
;
1093 dev_priv
->rps
.client_boost
= false;
1094 spin_unlock_irq(&dev_priv
->irq_lock
);
1096 /* Make sure we didn't queue anything we're not going to process. */
1097 WARN_ON(pm_iir
& ~dev_priv
->pm_rps_events
);
1099 if ((pm_iir
& dev_priv
->pm_rps_events
) == 0 && !client_boost
)
1102 mutex_lock(&dev_priv
->rps
.hw_lock
);
1104 pm_iir
|= vlv_wa_c0_ei(dev_priv
, pm_iir
);
1106 adj
= dev_priv
->rps
.last_adj
;
1107 new_delay
= dev_priv
->rps
.cur_freq
;
1108 min
= dev_priv
->rps
.min_freq_softlimit
;
1109 max
= dev_priv
->rps
.max_freq_softlimit
;
1112 new_delay
= dev_priv
->rps
.max_freq_softlimit
;
1114 } else if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
1117 else /* CHV needs even encode values */
1118 adj
= IS_CHERRYVIEW(dev_priv
) ? 2 : 1;
1120 * For better performance, jump directly
1121 * to RPe if we're below it.
1123 if (new_delay
< dev_priv
->rps
.efficient_freq
- adj
) {
1124 new_delay
= dev_priv
->rps
.efficient_freq
;
1127 } else if (any_waiters(dev_priv
)) {
1129 } else if (pm_iir
& GEN6_PM_RP_DOWN_TIMEOUT
) {
1130 if (dev_priv
->rps
.cur_freq
> dev_priv
->rps
.efficient_freq
)
1131 new_delay
= dev_priv
->rps
.efficient_freq
;
1133 new_delay
= dev_priv
->rps
.min_freq_softlimit
;
1135 } else if (pm_iir
& GEN6_PM_RP_DOWN_THRESHOLD
) {
1138 else /* CHV needs even encode values */
1139 adj
= IS_CHERRYVIEW(dev_priv
) ? -2 : -1;
1140 } else { /* unknown event */
1144 dev_priv
->rps
.last_adj
= adj
;
1146 /* sysfs frequency interfaces may have snuck in while servicing the
1150 new_delay
= clamp_t(int, new_delay
, min
, max
);
1152 intel_set_rps(dev_priv
->dev
, new_delay
);
1154 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1159 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1161 * @work: workqueue struct
1163 * Doesn't actually do anything except notify userspace. As a consequence of
1164 * this event, userspace should try to remap the bad rows since statistically
1165 * it is likely the same row is more likely to go bad again.
1167 static void ivybridge_parity_work(struct work_struct
*work
)
1169 struct drm_i915_private
*dev_priv
=
1170 container_of(work
, struct drm_i915_private
, l3_parity
.error_work
);
1171 u32 error_status
, row
, bank
, subbank
;
1172 char *parity_event
[6];
1176 /* We must turn off DOP level clock gating to access the L3 registers.
1177 * In order to prevent a get/put style interface, acquire struct mutex
1178 * any time we access those registers.
1180 mutex_lock(&dev_priv
->dev
->struct_mutex
);
1182 /* If we've screwed up tracking, just let the interrupt fire again */
1183 if (WARN_ON(!dev_priv
->l3_parity
.which_slice
))
1186 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
1187 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
1188 POSTING_READ(GEN7_MISCCPCTL
);
1190 while ((slice
= ffs(dev_priv
->l3_parity
.which_slice
)) != 0) {
1194 if (WARN_ON_ONCE(slice
>= NUM_L3_SLICES(dev_priv
->dev
)))
1197 dev_priv
->l3_parity
.which_slice
&= ~(1<<slice
);
1199 reg
= GEN7_L3CDERRST1
+ (slice
* 0x200);
1201 error_status
= I915_READ(reg
);
1202 row
= GEN7_PARITY_ERROR_ROW(error_status
);
1203 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
1204 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
1206 I915_WRITE(reg
, GEN7_PARITY_ERROR_VALID
| GEN7_L3CDERRST1_ENABLE
);
1209 parity_event
[0] = I915_L3_PARITY_UEVENT
"=1";
1210 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
1211 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
1212 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
1213 parity_event
[4] = kasprintf(GFP_KERNEL
, "SLICE=%d", slice
);
1214 parity_event
[5] = NULL
;
1216 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
->kobj
,
1217 KOBJ_CHANGE
, parity_event
);
1219 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1220 slice
, row
, bank
, subbank
);
1222 kfree(parity_event
[4]);
1223 kfree(parity_event
[3]);
1224 kfree(parity_event
[2]);
1225 kfree(parity_event
[1]);
1228 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
1231 WARN_ON(dev_priv
->l3_parity
.which_slice
);
1232 spin_lock_irq(&dev_priv
->irq_lock
);
1233 gen5_enable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev_priv
->dev
));
1234 spin_unlock_irq(&dev_priv
->irq_lock
);
1236 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
1239 static void ivybridge_parity_error_irq_handler(struct drm_device
*dev
, u32 iir
)
1241 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1243 if (!HAS_L3_DPF(dev
))
1246 spin_lock(&dev_priv
->irq_lock
);
1247 gen5_disable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev
));
1248 spin_unlock(&dev_priv
->irq_lock
);
1250 iir
&= GT_PARITY_ERROR(dev
);
1251 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1
)
1252 dev_priv
->l3_parity
.which_slice
|= 1 << 1;
1254 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT
)
1255 dev_priv
->l3_parity
.which_slice
|= 1 << 0;
1257 queue_work(dev_priv
->wq
, &dev_priv
->l3_parity
.error_work
);
1260 static void ilk_gt_irq_handler(struct drm_device
*dev
,
1261 struct drm_i915_private
*dev_priv
,
1265 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1266 notify_ring(&dev_priv
->ring
[RCS
]);
1267 if (gt_iir
& ILK_BSD_USER_INTERRUPT
)
1268 notify_ring(&dev_priv
->ring
[VCS
]);
1271 static void snb_gt_irq_handler(struct drm_device
*dev
,
1272 struct drm_i915_private
*dev_priv
,
1277 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1278 notify_ring(&dev_priv
->ring
[RCS
]);
1279 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
1280 notify_ring(&dev_priv
->ring
[VCS
]);
1281 if (gt_iir
& GT_BLT_USER_INTERRUPT
)
1282 notify_ring(&dev_priv
->ring
[BCS
]);
1284 if (gt_iir
& (GT_BLT_CS_ERROR_INTERRUPT
|
1285 GT_BSD_CS_ERROR_INTERRUPT
|
1286 GT_RENDER_CS_MASTER_ERROR_INTERRUPT
))
1287 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir
);
1289 if (gt_iir
& GT_PARITY_ERROR(dev
))
1290 ivybridge_parity_error_irq_handler(dev
, gt_iir
);
1293 static irqreturn_t
gen8_gt_irq_handler(struct drm_i915_private
*dev_priv
,
1296 irqreturn_t ret
= IRQ_NONE
;
1298 if (master_ctl
& (GEN8_GT_RCS_IRQ
| GEN8_GT_BCS_IRQ
)) {
1299 u32 tmp
= I915_READ_FW(GEN8_GT_IIR(0));
1301 I915_WRITE_FW(GEN8_GT_IIR(0), tmp
);
1304 if (tmp
& (GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
))
1305 intel_lrc_irq_handler(&dev_priv
->ring
[RCS
]);
1306 if (tmp
& (GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
))
1307 notify_ring(&dev_priv
->ring
[RCS
]);
1309 if (tmp
& (GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
))
1310 intel_lrc_irq_handler(&dev_priv
->ring
[BCS
]);
1311 if (tmp
& (GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
))
1312 notify_ring(&dev_priv
->ring
[BCS
]);
1314 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1317 if (master_ctl
& (GEN8_GT_VCS1_IRQ
| GEN8_GT_VCS2_IRQ
)) {
1318 u32 tmp
= I915_READ_FW(GEN8_GT_IIR(1));
1320 I915_WRITE_FW(GEN8_GT_IIR(1), tmp
);
1323 if (tmp
& (GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
))
1324 intel_lrc_irq_handler(&dev_priv
->ring
[VCS
]);
1325 if (tmp
& (GT_RENDER_USER_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
))
1326 notify_ring(&dev_priv
->ring
[VCS
]);
1328 if (tmp
& (GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
))
1329 intel_lrc_irq_handler(&dev_priv
->ring
[VCS2
]);
1330 if (tmp
& (GT_RENDER_USER_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
))
1331 notify_ring(&dev_priv
->ring
[VCS2
]);
1333 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1336 if (master_ctl
& GEN8_GT_VECS_IRQ
) {
1337 u32 tmp
= I915_READ_FW(GEN8_GT_IIR(3));
1339 I915_WRITE_FW(GEN8_GT_IIR(3), tmp
);
1342 if (tmp
& (GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
))
1343 intel_lrc_irq_handler(&dev_priv
->ring
[VECS
]);
1344 if (tmp
& (GT_RENDER_USER_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
))
1345 notify_ring(&dev_priv
->ring
[VECS
]);
1347 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1350 if (master_ctl
& GEN8_GT_PM_IRQ
) {
1351 u32 tmp
= I915_READ_FW(GEN8_GT_IIR(2));
1352 if (tmp
& dev_priv
->pm_rps_events
) {
1353 I915_WRITE_FW(GEN8_GT_IIR(2),
1354 tmp
& dev_priv
->pm_rps_events
);
1356 gen6_rps_irq_handler(dev_priv
, tmp
);
1358 DRM_ERROR("The master control interrupt lied (PM)!\n");
1364 static bool bxt_port_hotplug_long_detect(enum port port
, u32 val
)
1368 return val
& PORTA_HOTPLUG_LONG_DETECT
;
1370 return val
& PORTB_HOTPLUG_LONG_DETECT
;
1372 return val
& PORTC_HOTPLUG_LONG_DETECT
;
1378 static bool spt_port_hotplug2_long_detect(enum port port
, u32 val
)
1382 return val
& PORTE_HOTPLUG_LONG_DETECT
;
1388 static bool spt_port_hotplug_long_detect(enum port port
, u32 val
)
1392 return val
& PORTA_HOTPLUG_LONG_DETECT
;
1394 return val
& PORTB_HOTPLUG_LONG_DETECT
;
1396 return val
& PORTC_HOTPLUG_LONG_DETECT
;
1398 return val
& PORTD_HOTPLUG_LONG_DETECT
;
1404 static bool ilk_port_hotplug_long_detect(enum port port
, u32 val
)
1408 return val
& DIGITAL_PORTA_HOTPLUG_LONG_DETECT
;
1414 static bool pch_port_hotplug_long_detect(enum port port
, u32 val
)
1418 return val
& PORTB_HOTPLUG_LONG_DETECT
;
1420 return val
& PORTC_HOTPLUG_LONG_DETECT
;
1422 return val
& PORTD_HOTPLUG_LONG_DETECT
;
1428 static bool i9xx_port_hotplug_long_detect(enum port port
, u32 val
)
1432 return val
& PORTB_HOTPLUG_INT_LONG_PULSE
;
1434 return val
& PORTC_HOTPLUG_INT_LONG_PULSE
;
1436 return val
& PORTD_HOTPLUG_INT_LONG_PULSE
;
1443 * Get a bit mask of pins that have triggered, and which ones may be long.
1444 * This can be called multiple times with the same masks to accumulate
1445 * hotplug detection results from several registers.
1447 * Note that the caller is expected to zero out the masks initially.
1449 static void intel_get_hpd_pins(u32
*pin_mask
, u32
*long_mask
,
1450 u32 hotplug_trigger
, u32 dig_hotplug_reg
,
1451 const u32 hpd
[HPD_NUM_PINS
],
1452 bool long_pulse_detect(enum port port
, u32 val
))
1457 for_each_hpd_pin(i
) {
1458 if ((hpd
[i
] & hotplug_trigger
) == 0)
1461 *pin_mask
|= BIT(i
);
1463 if (!intel_hpd_pin_to_port(i
, &port
))
1466 if (long_pulse_detect(port
, dig_hotplug_reg
))
1467 *long_mask
|= BIT(i
);
1470 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1471 hotplug_trigger
, dig_hotplug_reg
, *pin_mask
);
1475 static void gmbus_irq_handler(struct drm_device
*dev
)
1477 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1479 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1482 static void dp_aux_irq_handler(struct drm_device
*dev
)
1484 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1486 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1489 #if defined(CONFIG_DEBUG_FS)
1490 static void display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1491 uint32_t crc0
, uint32_t crc1
,
1492 uint32_t crc2
, uint32_t crc3
,
1495 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1496 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
1497 struct intel_pipe_crc_entry
*entry
;
1500 spin_lock(&pipe_crc
->lock
);
1502 if (!pipe_crc
->entries
) {
1503 spin_unlock(&pipe_crc
->lock
);
1504 DRM_DEBUG_KMS("spurious interrupt\n");
1508 head
= pipe_crc
->head
;
1509 tail
= pipe_crc
->tail
;
1511 if (CIRC_SPACE(head
, tail
, INTEL_PIPE_CRC_ENTRIES_NR
) < 1) {
1512 spin_unlock(&pipe_crc
->lock
);
1513 DRM_ERROR("CRC buffer overflowing\n");
1517 entry
= &pipe_crc
->entries
[head
];
1519 entry
->frame
= dev
->driver
->get_vblank_counter(dev
, pipe
);
1520 entry
->crc
[0] = crc0
;
1521 entry
->crc
[1] = crc1
;
1522 entry
->crc
[2] = crc2
;
1523 entry
->crc
[3] = crc3
;
1524 entry
->crc
[4] = crc4
;
1526 head
= (head
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
1527 pipe_crc
->head
= head
;
1529 spin_unlock(&pipe_crc
->lock
);
1531 wake_up_interruptible(&pipe_crc
->wq
);
1535 display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1536 uint32_t crc0
, uint32_t crc1
,
1537 uint32_t crc2
, uint32_t crc3
,
1542 static void hsw_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1544 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1546 display_pipe_crc_irq_handler(dev
, pipe
,
1547 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1551 static void ivb_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1553 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1555 display_pipe_crc_irq_handler(dev
, pipe
,
1556 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1557 I915_READ(PIPE_CRC_RES_2_IVB(pipe
)),
1558 I915_READ(PIPE_CRC_RES_3_IVB(pipe
)),
1559 I915_READ(PIPE_CRC_RES_4_IVB(pipe
)),
1560 I915_READ(PIPE_CRC_RES_5_IVB(pipe
)));
1563 static void i9xx_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1565 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1566 uint32_t res1
, res2
;
1568 if (INTEL_INFO(dev
)->gen
>= 3)
1569 res1
= I915_READ(PIPE_CRC_RES_RES1_I915(pipe
));
1573 if (INTEL_INFO(dev
)->gen
>= 5 || IS_G4X(dev
))
1574 res2
= I915_READ(PIPE_CRC_RES_RES2_G4X(pipe
));
1578 display_pipe_crc_irq_handler(dev
, pipe
,
1579 I915_READ(PIPE_CRC_RES_RED(pipe
)),
1580 I915_READ(PIPE_CRC_RES_GREEN(pipe
)),
1581 I915_READ(PIPE_CRC_RES_BLUE(pipe
)),
1585 /* The RPS events need forcewake, so we add them to a work queue and mask their
1586 * IMR bits until the work is done. Other interrupts can be processed without
1587 * the work queue. */
1588 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1590 if (pm_iir
& dev_priv
->pm_rps_events
) {
1591 spin_lock(&dev_priv
->irq_lock
);
1592 gen6_disable_pm_irq(dev_priv
, pm_iir
& dev_priv
->pm_rps_events
);
1593 if (dev_priv
->rps
.interrupts_enabled
) {
1594 dev_priv
->rps
.pm_iir
|= pm_iir
& dev_priv
->pm_rps_events
;
1595 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
1597 spin_unlock(&dev_priv
->irq_lock
);
1600 if (INTEL_INFO(dev_priv
)->gen
>= 8)
1603 if (HAS_VEBOX(dev_priv
->dev
)) {
1604 if (pm_iir
& PM_VEBOX_USER_INTERRUPT
)
1605 notify_ring(&dev_priv
->ring
[VECS
]);
1607 if (pm_iir
& PM_VEBOX_CS_ERROR_INTERRUPT
)
1608 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir
);
1612 static bool intel_pipe_handle_vblank(struct drm_device
*dev
, enum pipe pipe
)
1614 if (!drm_handle_vblank(dev
, pipe
))
1620 static void valleyview_pipestat_irq_handler(struct drm_device
*dev
, u32 iir
)
1622 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1623 u32 pipe_stats
[I915_MAX_PIPES
] = { };
1626 spin_lock(&dev_priv
->irq_lock
);
1627 for_each_pipe(dev_priv
, pipe
) {
1629 u32 mask
, iir_bit
= 0;
1632 * PIPESTAT bits get signalled even when the interrupt is
1633 * disabled with the mask bits, and some of the status bits do
1634 * not generate interrupts at all (like the underrun bit). Hence
1635 * we need to be careful that we only handle what we want to
1639 /* fifo underruns are filterered in the underrun handler. */
1640 mask
= PIPE_FIFO_UNDERRUN_STATUS
;
1644 iir_bit
= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
;
1647 iir_bit
= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
1650 iir_bit
= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
1654 mask
|= dev_priv
->pipestat_irq_mask
[pipe
];
1659 reg
= PIPESTAT(pipe
);
1660 mask
|= PIPESTAT_INT_ENABLE_MASK
;
1661 pipe_stats
[pipe
] = I915_READ(reg
) & mask
;
1664 * Clear the PIPE*STAT regs before the IIR
1666 if (pipe_stats
[pipe
] & (PIPE_FIFO_UNDERRUN_STATUS
|
1667 PIPESTAT_INT_STATUS_MASK
))
1668 I915_WRITE(reg
, pipe_stats
[pipe
]);
1670 spin_unlock(&dev_priv
->irq_lock
);
1672 for_each_pipe(dev_priv
, pipe
) {
1673 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
1674 intel_pipe_handle_vblank(dev
, pipe
))
1675 intel_check_page_flip(dev
, pipe
);
1677 if (pipe_stats
[pipe
] & PLANE_FLIP_DONE_INT_STATUS_VLV
) {
1678 intel_prepare_page_flip(dev
, pipe
);
1679 intel_finish_page_flip(dev
, pipe
);
1682 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
1683 i9xx_pipe_crc_irq_handler(dev
, pipe
);
1685 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
1686 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
1689 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
1690 gmbus_irq_handler(dev
);
1693 static void i9xx_hpd_irq_handler(struct drm_device
*dev
)
1695 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1696 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
1697 u32 pin_mask
= 0, long_mask
= 0;
1699 if (!hotplug_status
)
1702 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
1704 * Make sure hotplug status is cleared before we clear IIR, or else we
1705 * may miss hotplug events.
1707 POSTING_READ(PORT_HOTPLUG_STAT
);
1709 if (IS_G4X(dev
) || IS_VALLEYVIEW(dev
)) {
1710 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_G4X
;
1712 if (hotplug_trigger
) {
1713 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug_trigger
,
1714 hotplug_trigger
, hpd_status_g4x
,
1715 i9xx_port_hotplug_long_detect
);
1717 intel_hpd_irq_handler(dev
, pin_mask
, long_mask
);
1720 if (hotplug_status
& DP_AUX_CHANNEL_MASK_INT_STATUS_G4X
)
1721 dp_aux_irq_handler(dev
);
1723 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
1725 if (hotplug_trigger
) {
1726 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug_trigger
,
1727 hotplug_trigger
, hpd_status_i915
,
1728 i9xx_port_hotplug_long_detect
);
1729 intel_hpd_irq_handler(dev
, pin_mask
, long_mask
);
1734 static irqreturn_t
valleyview_irq_handler(int irq
, void *arg
)
1736 struct drm_device
*dev
= arg
;
1737 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1738 u32 iir
, gt_iir
, pm_iir
;
1739 irqreturn_t ret
= IRQ_NONE
;
1741 if (!intel_irqs_enabled(dev_priv
))
1745 /* Find, clear, then process each source of interrupt */
1747 gt_iir
= I915_READ(GTIIR
);
1749 I915_WRITE(GTIIR
, gt_iir
);
1751 pm_iir
= I915_READ(GEN6_PMIIR
);
1753 I915_WRITE(GEN6_PMIIR
, pm_iir
);
1755 iir
= I915_READ(VLV_IIR
);
1757 /* Consume port before clearing IIR or we'll miss events */
1758 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
1759 i9xx_hpd_irq_handler(dev
);
1760 I915_WRITE(VLV_IIR
, iir
);
1763 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
1769 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1771 gen6_rps_irq_handler(dev_priv
, pm_iir
);
1772 /* Call regardless, as some status bits might not be
1773 * signalled in iir */
1774 valleyview_pipestat_irq_handler(dev
, iir
);
1781 static irqreturn_t
cherryview_irq_handler(int irq
, void *arg
)
1783 struct drm_device
*dev
= arg
;
1784 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1785 u32 master_ctl
, iir
;
1786 irqreturn_t ret
= IRQ_NONE
;
1788 if (!intel_irqs_enabled(dev_priv
))
1792 master_ctl
= I915_READ(GEN8_MASTER_IRQ
) & ~GEN8_MASTER_IRQ_CONTROL
;
1793 iir
= I915_READ(VLV_IIR
);
1795 if (master_ctl
== 0 && iir
== 0)
1800 I915_WRITE(GEN8_MASTER_IRQ
, 0);
1802 /* Find, clear, then process each source of interrupt */
1805 /* Consume port before clearing IIR or we'll miss events */
1806 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
1807 i9xx_hpd_irq_handler(dev
);
1808 I915_WRITE(VLV_IIR
, iir
);
1811 gen8_gt_irq_handler(dev_priv
, master_ctl
);
1813 /* Call regardless, as some status bits might not be
1814 * signalled in iir */
1815 valleyview_pipestat_irq_handler(dev
, iir
);
1817 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
1818 POSTING_READ(GEN8_MASTER_IRQ
);
1824 static void ibx_hpd_irq_handler(struct drm_device
*dev
, u32 hotplug_trigger
,
1825 const u32 hpd
[HPD_NUM_PINS
])
1827 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1828 u32 dig_hotplug_reg
, pin_mask
= 0, long_mask
= 0;
1830 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
1831 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
1833 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug_trigger
,
1834 dig_hotplug_reg
, hpd
,
1835 pch_port_hotplug_long_detect
);
1837 intel_hpd_irq_handler(dev
, pin_mask
, long_mask
);
1840 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1842 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1844 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK
;
1846 if (hotplug_trigger
)
1847 ibx_hpd_irq_handler(dev
, hotplug_trigger
, hpd_ibx
);
1849 if (pch_iir
& SDE_AUDIO_POWER_MASK
) {
1850 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK
) >>
1851 SDE_AUDIO_POWER_SHIFT
);
1852 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1856 if (pch_iir
& SDE_AUX_MASK
)
1857 dp_aux_irq_handler(dev
);
1859 if (pch_iir
& SDE_GMBUS
)
1860 gmbus_irq_handler(dev
);
1862 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
1863 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1865 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
1866 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1868 if (pch_iir
& SDE_POISON
)
1869 DRM_ERROR("PCH poison interrupt\n");
1871 if (pch_iir
& SDE_FDI_MASK
)
1872 for_each_pipe(dev_priv
, pipe
)
1873 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1875 I915_READ(FDI_RX_IIR(pipe
)));
1877 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
1878 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1880 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
1881 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1883 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
1884 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_A
);
1886 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
1887 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_B
);
1890 static void ivb_err_int_handler(struct drm_device
*dev
)
1892 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1893 u32 err_int
= I915_READ(GEN7_ERR_INT
);
1896 if (err_int
& ERR_INT_POISON
)
1897 DRM_ERROR("Poison interrupt\n");
1899 for_each_pipe(dev_priv
, pipe
) {
1900 if (err_int
& ERR_INT_FIFO_UNDERRUN(pipe
))
1901 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
1903 if (err_int
& ERR_INT_PIPE_CRC_DONE(pipe
)) {
1904 if (IS_IVYBRIDGE(dev
))
1905 ivb_pipe_crc_irq_handler(dev
, pipe
);
1907 hsw_pipe_crc_irq_handler(dev
, pipe
);
1911 I915_WRITE(GEN7_ERR_INT
, err_int
);
1914 static void cpt_serr_int_handler(struct drm_device
*dev
)
1916 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1917 u32 serr_int
= I915_READ(SERR_INT
);
1919 if (serr_int
& SERR_INT_POISON
)
1920 DRM_ERROR("PCH poison interrupt\n");
1922 if (serr_int
& SERR_INT_TRANS_A_FIFO_UNDERRUN
)
1923 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_A
);
1925 if (serr_int
& SERR_INT_TRANS_B_FIFO_UNDERRUN
)
1926 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_B
);
1928 if (serr_int
& SERR_INT_TRANS_C_FIFO_UNDERRUN
)
1929 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_C
);
1931 I915_WRITE(SERR_INT
, serr_int
);
1934 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1936 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1938 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK_CPT
;
1940 if (hotplug_trigger
)
1941 ibx_hpd_irq_handler(dev
, hotplug_trigger
, hpd_cpt
);
1943 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) {
1944 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
1945 SDE_AUDIO_POWER_SHIFT_CPT
);
1946 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1950 if (pch_iir
& SDE_AUX_MASK_CPT
)
1951 dp_aux_irq_handler(dev
);
1953 if (pch_iir
& SDE_GMBUS_CPT
)
1954 gmbus_irq_handler(dev
);
1956 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
1957 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1959 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
1960 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1962 if (pch_iir
& SDE_FDI_MASK_CPT
)
1963 for_each_pipe(dev_priv
, pipe
)
1964 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1966 I915_READ(FDI_RX_IIR(pipe
)));
1968 if (pch_iir
& SDE_ERROR_CPT
)
1969 cpt_serr_int_handler(dev
);
1972 static void spt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1974 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1975 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK_SPT
&
1976 ~SDE_PORTE_HOTPLUG_SPT
;
1977 u32 hotplug2_trigger
= pch_iir
& SDE_PORTE_HOTPLUG_SPT
;
1978 u32 pin_mask
= 0, long_mask
= 0;
1980 if (hotplug_trigger
) {
1981 u32 dig_hotplug_reg
;
1983 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
1984 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
1986 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug_trigger
,
1987 dig_hotplug_reg
, hpd_spt
,
1988 spt_port_hotplug_long_detect
);
1991 if (hotplug2_trigger
) {
1992 u32 dig_hotplug_reg
;
1994 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG2
);
1995 I915_WRITE(PCH_PORT_HOTPLUG2
, dig_hotplug_reg
);
1997 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug2_trigger
,
1998 dig_hotplug_reg
, hpd_spt
,
1999 spt_port_hotplug2_long_detect
);
2003 intel_hpd_irq_handler(dev
, pin_mask
, long_mask
);
2005 if (pch_iir
& SDE_GMBUS_CPT
)
2006 gmbus_irq_handler(dev
);
2009 static void ilk_hpd_irq_handler(struct drm_device
*dev
, u32 hotplug_trigger
,
2010 const u32 hpd
[HPD_NUM_PINS
])
2012 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2013 u32 dig_hotplug_reg
, pin_mask
= 0, long_mask
= 0;
2015 dig_hotplug_reg
= I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL
);
2016 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL
, dig_hotplug_reg
);
2018 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug_trigger
,
2019 dig_hotplug_reg
, hpd
,
2020 ilk_port_hotplug_long_detect
);
2022 intel_hpd_irq_handler(dev
, pin_mask
, long_mask
);
2025 static void ilk_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
2027 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2029 u32 hotplug_trigger
= de_iir
& DE_DP_A_HOTPLUG
;
2031 if (hotplug_trigger
)
2032 ilk_hpd_irq_handler(dev
, hotplug_trigger
, hpd_ilk
);
2034 if (de_iir
& DE_AUX_CHANNEL_A
)
2035 dp_aux_irq_handler(dev
);
2037 if (de_iir
& DE_GSE
)
2038 intel_opregion_asle_intr(dev
);
2040 if (de_iir
& DE_POISON
)
2041 DRM_ERROR("Poison interrupt\n");
2043 for_each_pipe(dev_priv
, pipe
) {
2044 if (de_iir
& DE_PIPE_VBLANK(pipe
) &&
2045 intel_pipe_handle_vblank(dev
, pipe
))
2046 intel_check_page_flip(dev
, pipe
);
2048 if (de_iir
& DE_PIPE_FIFO_UNDERRUN(pipe
))
2049 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
2051 if (de_iir
& DE_PIPE_CRC_DONE(pipe
))
2052 i9xx_pipe_crc_irq_handler(dev
, pipe
);
2054 /* plane/pipes map 1:1 on ilk+ */
2055 if (de_iir
& DE_PLANE_FLIP_DONE(pipe
)) {
2056 intel_prepare_page_flip(dev
, pipe
);
2057 intel_finish_page_flip_plane(dev
, pipe
);
2061 /* check event from PCH */
2062 if (de_iir
& DE_PCH_EVENT
) {
2063 u32 pch_iir
= I915_READ(SDEIIR
);
2065 if (HAS_PCH_CPT(dev
))
2066 cpt_irq_handler(dev
, pch_iir
);
2068 ibx_irq_handler(dev
, pch_iir
);
2070 /* should clear PCH hotplug event before clear CPU irq */
2071 I915_WRITE(SDEIIR
, pch_iir
);
2074 if (IS_GEN5(dev
) && de_iir
& DE_PCU_EVENT
)
2075 ironlake_rps_change_irq_handler(dev
);
2078 static void ivb_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
2080 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2082 u32 hotplug_trigger
= de_iir
& DE_DP_A_HOTPLUG_IVB
;
2084 if (hotplug_trigger
)
2085 ilk_hpd_irq_handler(dev
, hotplug_trigger
, hpd_ivb
);
2087 if (de_iir
& DE_ERR_INT_IVB
)
2088 ivb_err_int_handler(dev
);
2090 if (de_iir
& DE_AUX_CHANNEL_A_IVB
)
2091 dp_aux_irq_handler(dev
);
2093 if (de_iir
& DE_GSE_IVB
)
2094 intel_opregion_asle_intr(dev
);
2096 for_each_pipe(dev_priv
, pipe
) {
2097 if (de_iir
& (DE_PIPE_VBLANK_IVB(pipe
)) &&
2098 intel_pipe_handle_vblank(dev
, pipe
))
2099 intel_check_page_flip(dev
, pipe
);
2101 /* plane/pipes map 1:1 on ilk+ */
2102 if (de_iir
& DE_PLANE_FLIP_DONE_IVB(pipe
)) {
2103 intel_prepare_page_flip(dev
, pipe
);
2104 intel_finish_page_flip_plane(dev
, pipe
);
2108 /* check event from PCH */
2109 if (!HAS_PCH_NOP(dev
) && (de_iir
& DE_PCH_EVENT_IVB
)) {
2110 u32 pch_iir
= I915_READ(SDEIIR
);
2112 cpt_irq_handler(dev
, pch_iir
);
2114 /* clear PCH hotplug event before clear CPU irq */
2115 I915_WRITE(SDEIIR
, pch_iir
);
2120 * To handle irqs with the minimum potential races with fresh interrupts, we:
2121 * 1 - Disable Master Interrupt Control.
2122 * 2 - Find the source(s) of the interrupt.
2123 * 3 - Clear the Interrupt Identity bits (IIR).
2124 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2125 * 5 - Re-enable Master Interrupt Control.
2127 static irqreturn_t
ironlake_irq_handler(int irq
, void *arg
)
2129 struct drm_device
*dev
= arg
;
2130 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2131 u32 de_iir
, gt_iir
, de_ier
, sde_ier
= 0;
2132 irqreturn_t ret
= IRQ_NONE
;
2134 if (!intel_irqs_enabled(dev_priv
))
2137 /* We get interrupts on unclaimed registers, so check for this before we
2138 * do any I915_{READ,WRITE}. */
2139 intel_uncore_check_errors(dev
);
2141 /* disable master interrupt before clearing iir */
2142 de_ier
= I915_READ(DEIER
);
2143 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
2144 POSTING_READ(DEIER
);
2146 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2147 * interrupts will will be stored on its back queue, and then we'll be
2148 * able to process them after we restore SDEIER (as soon as we restore
2149 * it, we'll get an interrupt if SDEIIR still has something to process
2150 * due to its back queue). */
2151 if (!HAS_PCH_NOP(dev
)) {
2152 sde_ier
= I915_READ(SDEIER
);
2153 I915_WRITE(SDEIER
, 0);
2154 POSTING_READ(SDEIER
);
2157 /* Find, clear, then process each source of interrupt */
2159 gt_iir
= I915_READ(GTIIR
);
2161 I915_WRITE(GTIIR
, gt_iir
);
2163 if (INTEL_INFO(dev
)->gen
>= 6)
2164 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2166 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2169 de_iir
= I915_READ(DEIIR
);
2171 I915_WRITE(DEIIR
, de_iir
);
2173 if (INTEL_INFO(dev
)->gen
>= 7)
2174 ivb_display_irq_handler(dev
, de_iir
);
2176 ilk_display_irq_handler(dev
, de_iir
);
2179 if (INTEL_INFO(dev
)->gen
>= 6) {
2180 u32 pm_iir
= I915_READ(GEN6_PMIIR
);
2182 I915_WRITE(GEN6_PMIIR
, pm_iir
);
2184 gen6_rps_irq_handler(dev_priv
, pm_iir
);
2188 I915_WRITE(DEIER
, de_ier
);
2189 POSTING_READ(DEIER
);
2190 if (!HAS_PCH_NOP(dev
)) {
2191 I915_WRITE(SDEIER
, sde_ier
);
2192 POSTING_READ(SDEIER
);
2198 static void bxt_hpd_irq_handler(struct drm_device
*dev
, u32 hotplug_trigger
,
2199 const u32 hpd
[HPD_NUM_PINS
])
2201 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2202 u32 dig_hotplug_reg
, pin_mask
= 0, long_mask
= 0;
2204 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
2205 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
2207 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug_trigger
,
2208 dig_hotplug_reg
, hpd
,
2209 bxt_port_hotplug_long_detect
);
2211 intel_hpd_irq_handler(dev
, pin_mask
, long_mask
);
2214 static irqreturn_t
gen8_irq_handler(int irq
, void *arg
)
2216 struct drm_device
*dev
= arg
;
2217 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2219 irqreturn_t ret
= IRQ_NONE
;
2222 u32 aux_mask
= GEN8_AUX_CHANNEL_A
;
2224 if (!intel_irqs_enabled(dev_priv
))
2227 if (INTEL_INFO(dev_priv
)->gen
>= 9)
2228 aux_mask
|= GEN9_AUX_CHANNEL_B
| GEN9_AUX_CHANNEL_C
|
2231 master_ctl
= I915_READ_FW(GEN8_MASTER_IRQ
);
2232 master_ctl
&= ~GEN8_MASTER_IRQ_CONTROL
;
2236 I915_WRITE_FW(GEN8_MASTER_IRQ
, 0);
2238 /* Find, clear, then process each source of interrupt */
2240 ret
= gen8_gt_irq_handler(dev_priv
, master_ctl
);
2242 if (master_ctl
& GEN8_DE_MISC_IRQ
) {
2243 tmp
= I915_READ(GEN8_DE_MISC_IIR
);
2245 I915_WRITE(GEN8_DE_MISC_IIR
, tmp
);
2247 if (tmp
& GEN8_DE_MISC_GSE
)
2248 intel_opregion_asle_intr(dev
);
2250 DRM_ERROR("Unexpected DE Misc interrupt\n");
2253 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2256 if (master_ctl
& GEN8_DE_PORT_IRQ
) {
2257 tmp
= I915_READ(GEN8_DE_PORT_IIR
);
2260 u32 hotplug_trigger
= 0;
2262 if (IS_BROXTON(dev_priv
))
2263 hotplug_trigger
= tmp
& BXT_DE_PORT_HOTPLUG_MASK
;
2264 else if (IS_BROADWELL(dev_priv
))
2265 hotplug_trigger
= tmp
& GEN8_PORT_DP_A_HOTPLUG
;
2267 I915_WRITE(GEN8_DE_PORT_IIR
, tmp
);
2270 if (tmp
& aux_mask
) {
2271 dp_aux_irq_handler(dev
);
2275 if (hotplug_trigger
) {
2276 if (IS_BROXTON(dev
))
2277 bxt_hpd_irq_handler(dev
, hotplug_trigger
, hpd_bxt
);
2279 ilk_hpd_irq_handler(dev
, hotplug_trigger
, hpd_bdw
);
2283 if (IS_BROXTON(dev
) && (tmp
& BXT_DE_PORT_GMBUS
)) {
2284 gmbus_irq_handler(dev
);
2289 DRM_ERROR("Unexpected DE Port interrupt\n");
2292 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2295 for_each_pipe(dev_priv
, pipe
) {
2296 uint32_t pipe_iir
, flip_done
= 0, fault_errors
= 0;
2298 if (!(master_ctl
& GEN8_DE_PIPE_IRQ(pipe
)))
2301 pipe_iir
= I915_READ(GEN8_DE_PIPE_IIR(pipe
));
2304 I915_WRITE(GEN8_DE_PIPE_IIR(pipe
), pipe_iir
);
2306 if (pipe_iir
& GEN8_PIPE_VBLANK
&&
2307 intel_pipe_handle_vblank(dev
, pipe
))
2308 intel_check_page_flip(dev
, pipe
);
2310 if (INTEL_INFO(dev_priv
)->gen
>= 9)
2311 flip_done
= pipe_iir
& GEN9_PIPE_PLANE1_FLIP_DONE
;
2313 flip_done
= pipe_iir
& GEN8_PIPE_PRIMARY_FLIP_DONE
;
2316 intel_prepare_page_flip(dev
, pipe
);
2317 intel_finish_page_flip_plane(dev
, pipe
);
2320 if (pipe_iir
& GEN8_PIPE_CDCLK_CRC_DONE
)
2321 hsw_pipe_crc_irq_handler(dev
, pipe
);
2323 if (pipe_iir
& GEN8_PIPE_FIFO_UNDERRUN
)
2324 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
2328 if (INTEL_INFO(dev_priv
)->gen
>= 9)
2329 fault_errors
= pipe_iir
& GEN9_DE_PIPE_IRQ_FAULT_ERRORS
;
2331 fault_errors
= pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
2334 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2336 pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
);
2338 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2341 if (HAS_PCH_SPLIT(dev
) && !HAS_PCH_NOP(dev
) &&
2342 master_ctl
& GEN8_DE_PCH_IRQ
) {
2344 * FIXME(BDW): Assume for now that the new interrupt handling
2345 * scheme also closed the SDE interrupt handling race we've seen
2346 * on older pch-split platforms. But this needs testing.
2348 u32 pch_iir
= I915_READ(SDEIIR
);
2350 I915_WRITE(SDEIIR
, pch_iir
);
2353 if (HAS_PCH_SPT(dev_priv
))
2354 spt_irq_handler(dev
, pch_iir
);
2356 cpt_irq_handler(dev
, pch_iir
);
2358 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2362 I915_WRITE_FW(GEN8_MASTER_IRQ
, GEN8_MASTER_IRQ_CONTROL
);
2363 POSTING_READ_FW(GEN8_MASTER_IRQ
);
2368 static void i915_error_wake_up(struct drm_i915_private
*dev_priv
,
2369 bool reset_completed
)
2371 struct intel_engine_cs
*ring
;
2375 * Notify all waiters for GPU completion events that reset state has
2376 * been changed, and that they need to restart their wait after
2377 * checking for potential errors (and bail out to drop locks if there is
2378 * a gpu reset pending so that i915_error_work_func can acquire them).
2381 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2382 for_each_ring(ring
, dev_priv
, i
)
2383 wake_up_all(&ring
->irq_queue
);
2385 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2386 wake_up_all(&dev_priv
->pending_flip_queue
);
2389 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2390 * reset state is cleared.
2392 if (reset_completed
)
2393 wake_up_all(&dev_priv
->gpu_error
.reset_queue
);
2397 * i915_reset_and_wakeup - do process context error handling work
2400 * Fire an error uevent so userspace can see that a hang or error
2403 static void i915_reset_and_wakeup(struct drm_device
*dev
)
2405 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2406 struct i915_gpu_error
*error
= &dev_priv
->gpu_error
;
2407 char *error_event
[] = { I915_ERROR_UEVENT
"=1", NULL
};
2408 char *reset_event
[] = { I915_RESET_UEVENT
"=1", NULL
};
2409 char *reset_done_event
[] = { I915_ERROR_UEVENT
"=0", NULL
};
2412 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
, error_event
);
2415 * Note that there's only one work item which does gpu resets, so we
2416 * need not worry about concurrent gpu resets potentially incrementing
2417 * error->reset_counter twice. We only need to take care of another
2418 * racing irq/hangcheck declaring the gpu dead for a second time. A
2419 * quick check for that is good enough: schedule_work ensures the
2420 * correct ordering between hang detection and this work item, and since
2421 * the reset in-progress bit is only ever set by code outside of this
2422 * work we don't need to worry about any other races.
2424 if (i915_reset_in_progress(error
) && !i915_terminally_wedged(error
)) {
2425 DRM_DEBUG_DRIVER("resetting chip\n");
2426 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
,
2430 * In most cases it's guaranteed that we get here with an RPM
2431 * reference held, for example because there is a pending GPU
2432 * request that won't finish until the reset is done. This
2433 * isn't the case at least when we get here by doing a
2434 * simulated reset via debugs, so get an RPM reference.
2436 intel_runtime_pm_get(dev_priv
);
2438 intel_prepare_reset(dev
);
2441 * All state reset _must_ be completed before we update the
2442 * reset counter, for otherwise waiters might miss the reset
2443 * pending state and not properly drop locks, resulting in
2444 * deadlocks with the reset work.
2446 ret
= i915_reset(dev
);
2448 intel_finish_reset(dev
);
2450 intel_runtime_pm_put(dev_priv
);
2454 * After all the gem state is reset, increment the reset
2455 * counter and wake up everyone waiting for the reset to
2458 * Since unlock operations are a one-sided barrier only,
2459 * we need to insert a barrier here to order any seqno
2461 * the counter increment.
2463 smp_mb__before_atomic();
2464 atomic_inc(&dev_priv
->gpu_error
.reset_counter
);
2466 kobject_uevent_env(&dev
->primary
->kdev
->kobj
,
2467 KOBJ_CHANGE
, reset_done_event
);
2469 atomic_or(I915_WEDGED
, &error
->reset_counter
);
2473 * Note: The wake_up also serves as a memory barrier so that
2474 * waiters see the update value of the reset counter atomic_t.
2476 i915_error_wake_up(dev_priv
, true);
2480 static void i915_report_and_clear_eir(struct drm_device
*dev
)
2482 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2483 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
2484 u32 eir
= I915_READ(EIR
);
2490 pr_err("render error detected, EIR: 0x%08x\n", eir
);
2492 i915_get_extra_instdone(dev
, instdone
);
2495 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
2496 u32 ipeir
= I915_READ(IPEIR_I965
);
2498 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2499 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2500 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2501 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2502 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2503 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2504 I915_WRITE(IPEIR_I965
, ipeir
);
2505 POSTING_READ(IPEIR_I965
);
2507 if (eir
& GM45_ERROR_PAGE_TABLE
) {
2508 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2509 pr_err("page table error\n");
2510 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2511 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2512 POSTING_READ(PGTBL_ER
);
2516 if (!IS_GEN2(dev
)) {
2517 if (eir
& I915_ERROR_PAGE_TABLE
) {
2518 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2519 pr_err("page table error\n");
2520 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2521 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2522 POSTING_READ(PGTBL_ER
);
2526 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
2527 pr_err("memory refresh error:\n");
2528 for_each_pipe(dev_priv
, pipe
)
2529 pr_err("pipe %c stat: 0x%08x\n",
2530 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
2531 /* pipestat has already been acked */
2533 if (eir
& I915_ERROR_INSTRUCTION
) {
2534 pr_err("instruction error\n");
2535 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
2536 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2537 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2538 if (INTEL_INFO(dev
)->gen
< 4) {
2539 u32 ipeir
= I915_READ(IPEIR
);
2541 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
2542 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
2543 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
2544 I915_WRITE(IPEIR
, ipeir
);
2545 POSTING_READ(IPEIR
);
2547 u32 ipeir
= I915_READ(IPEIR_I965
);
2549 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2550 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2551 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2552 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2553 I915_WRITE(IPEIR_I965
, ipeir
);
2554 POSTING_READ(IPEIR_I965
);
2558 I915_WRITE(EIR
, eir
);
2560 eir
= I915_READ(EIR
);
2563 * some errors might have become stuck,
2566 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
2567 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
2568 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2573 * i915_handle_error - handle a gpu error
2576 * Do some basic checking of register state at error time and
2577 * dump it to the syslog. Also call i915_capture_error_state() to make
2578 * sure we get a record and make it available in debugfs. Fire a uevent
2579 * so userspace knows something bad happened (should trigger collection
2580 * of a ring dump etc.).
2582 void i915_handle_error(struct drm_device
*dev
, bool wedged
,
2583 const char *fmt
, ...)
2585 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2589 va_start(args
, fmt
);
2590 vscnprintf(error_msg
, sizeof(error_msg
), fmt
, args
);
2593 i915_capture_error_state(dev
, wedged
, error_msg
);
2594 i915_report_and_clear_eir(dev
);
2597 atomic_or(I915_RESET_IN_PROGRESS_FLAG
,
2598 &dev_priv
->gpu_error
.reset_counter
);
2601 * Wakeup waiting processes so that the reset function
2602 * i915_reset_and_wakeup doesn't deadlock trying to grab
2603 * various locks. By bumping the reset counter first, the woken
2604 * processes will see a reset in progress and back off,
2605 * releasing their locks and then wait for the reset completion.
2606 * We must do this for _all_ gpu waiters that might hold locks
2607 * that the reset work needs to acquire.
2609 * Note: The wake_up serves as the required memory barrier to
2610 * ensure that the waiters see the updated value of the reset
2613 i915_error_wake_up(dev_priv
, false);
2616 i915_reset_and_wakeup(dev
);
2619 /* Called from drm generic code, passed 'crtc' which
2620 * we use as a pipe index
2622 static int i915_enable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2624 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2625 unsigned long irqflags
;
2627 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2628 if (INTEL_INFO(dev
)->gen
>= 4)
2629 i915_enable_pipestat(dev_priv
, pipe
,
2630 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2632 i915_enable_pipestat(dev_priv
, pipe
,
2633 PIPE_VBLANK_INTERRUPT_STATUS
);
2634 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2639 static int ironlake_enable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2641 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2642 unsigned long irqflags
;
2643 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2644 DE_PIPE_VBLANK(pipe
);
2646 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2647 ironlake_enable_display_irq(dev_priv
, bit
);
2648 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2653 static int valleyview_enable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2655 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2656 unsigned long irqflags
;
2658 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2659 i915_enable_pipestat(dev_priv
, pipe
,
2660 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2661 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2666 static int gen8_enable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2668 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2669 unsigned long irqflags
;
2671 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2672 dev_priv
->de_irq_mask
[pipe
] &= ~GEN8_PIPE_VBLANK
;
2673 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2674 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
2675 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2679 /* Called from drm generic code, passed 'crtc' which
2680 * we use as a pipe index
2682 static void i915_disable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2684 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2685 unsigned long irqflags
;
2687 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2688 i915_disable_pipestat(dev_priv
, pipe
,
2689 PIPE_VBLANK_INTERRUPT_STATUS
|
2690 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2691 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2694 static void ironlake_disable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2696 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2697 unsigned long irqflags
;
2698 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2699 DE_PIPE_VBLANK(pipe
);
2701 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2702 ironlake_disable_display_irq(dev_priv
, bit
);
2703 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2706 static void valleyview_disable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2708 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2709 unsigned long irqflags
;
2711 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2712 i915_disable_pipestat(dev_priv
, pipe
,
2713 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2714 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2717 static void gen8_disable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2719 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2720 unsigned long irqflags
;
2722 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2723 dev_priv
->de_irq_mask
[pipe
] |= GEN8_PIPE_VBLANK
;
2724 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2725 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
2726 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2730 ring_idle(struct intel_engine_cs
*ring
, u32 seqno
)
2732 return (list_empty(&ring
->request_list
) ||
2733 i915_seqno_passed(seqno
, ring
->last_submitted_seqno
));
2737 ipehr_is_semaphore_wait(struct drm_device
*dev
, u32 ipehr
)
2739 if (INTEL_INFO(dev
)->gen
>= 8) {
2740 return (ipehr
>> 23) == 0x1c;
2742 ipehr
&= ~MI_SEMAPHORE_SYNC_MASK
;
2743 return ipehr
== (MI_SEMAPHORE_MBOX
| MI_SEMAPHORE_COMPARE
|
2744 MI_SEMAPHORE_REGISTER
);
2748 static struct intel_engine_cs
*
2749 semaphore_wait_to_signaller_ring(struct intel_engine_cs
*ring
, u32 ipehr
, u64 offset
)
2751 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2752 struct intel_engine_cs
*signaller
;
2755 if (INTEL_INFO(dev_priv
->dev
)->gen
>= 8) {
2756 for_each_ring(signaller
, dev_priv
, i
) {
2757 if (ring
== signaller
)
2760 if (offset
== signaller
->semaphore
.signal_ggtt
[ring
->id
])
2764 u32 sync_bits
= ipehr
& MI_SEMAPHORE_SYNC_MASK
;
2766 for_each_ring(signaller
, dev_priv
, i
) {
2767 if(ring
== signaller
)
2770 if (sync_bits
== signaller
->semaphore
.mbox
.wait
[ring
->id
])
2775 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2776 ring
->id
, ipehr
, offset
);
2781 static struct intel_engine_cs
*
2782 semaphore_waits_for(struct intel_engine_cs
*ring
, u32
*seqno
)
2784 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2785 u32 cmd
, ipehr
, head
;
2790 * This function does not support execlist mode - any attempt to
2791 * proceed further into this function will result in a kernel panic
2792 * when dereferencing ring->buffer, which is not set up in execlist
2795 * The correct way of doing it would be to derive the currently
2796 * executing ring buffer from the current context, which is derived
2797 * from the currently running request. Unfortunately, to get the
2798 * current request we would have to grab the struct_mutex before doing
2799 * anything else, which would be ill-advised since some other thread
2800 * might have grabbed it already and managed to hang itself, causing
2801 * the hang checker to deadlock.
2803 * Therefore, this function does not support execlist mode in its
2804 * current form. Just return NULL and move on.
2806 if (ring
->buffer
== NULL
)
2809 ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
2810 if (!ipehr_is_semaphore_wait(ring
->dev
, ipehr
))
2814 * HEAD is likely pointing to the dword after the actual command,
2815 * so scan backwards until we find the MBOX. But limit it to just 3
2816 * or 4 dwords depending on the semaphore wait command size.
2817 * Note that we don't care about ACTHD here since that might
2818 * point at at batch, and semaphores are always emitted into the
2819 * ringbuffer itself.
2821 head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
2822 backwards
= (INTEL_INFO(ring
->dev
)->gen
>= 8) ? 5 : 4;
2824 for (i
= backwards
; i
; --i
) {
2826 * Be paranoid and presume the hw has gone off into the wild -
2827 * our ring is smaller than what the hardware (and hence
2828 * HEAD_ADDR) allows. Also handles wrap-around.
2830 head
&= ring
->buffer
->size
- 1;
2832 /* This here seems to blow up */
2833 cmd
= ioread32(ring
->buffer
->virtual_start
+ head
);
2843 *seqno
= ioread32(ring
->buffer
->virtual_start
+ head
+ 4) + 1;
2844 if (INTEL_INFO(ring
->dev
)->gen
>= 8) {
2845 offset
= ioread32(ring
->buffer
->virtual_start
+ head
+ 12);
2847 offset
= ioread32(ring
->buffer
->virtual_start
+ head
+ 8);
2849 return semaphore_wait_to_signaller_ring(ring
, ipehr
, offset
);
2852 static int semaphore_passed(struct intel_engine_cs
*ring
)
2854 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2855 struct intel_engine_cs
*signaller
;
2858 ring
->hangcheck
.deadlock
++;
2860 signaller
= semaphore_waits_for(ring
, &seqno
);
2861 if (signaller
== NULL
)
2864 /* Prevent pathological recursion due to driver bugs */
2865 if (signaller
->hangcheck
.deadlock
>= I915_NUM_RINGS
)
2868 if (i915_seqno_passed(signaller
->get_seqno(signaller
, false), seqno
))
2871 /* cursory check for an unkickable deadlock */
2872 if (I915_READ_CTL(signaller
) & RING_WAIT_SEMAPHORE
&&
2873 semaphore_passed(signaller
) < 0)
2879 static void semaphore_clear_deadlocks(struct drm_i915_private
*dev_priv
)
2881 struct intel_engine_cs
*ring
;
2884 for_each_ring(ring
, dev_priv
, i
)
2885 ring
->hangcheck
.deadlock
= 0;
2888 static enum intel_ring_hangcheck_action
2889 ring_stuck(struct intel_engine_cs
*ring
, u64 acthd
)
2891 struct drm_device
*dev
= ring
->dev
;
2892 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2895 if (acthd
!= ring
->hangcheck
.acthd
) {
2896 if (acthd
> ring
->hangcheck
.max_acthd
) {
2897 ring
->hangcheck
.max_acthd
= acthd
;
2898 return HANGCHECK_ACTIVE
;
2901 return HANGCHECK_ACTIVE_LOOP
;
2905 return HANGCHECK_HUNG
;
2907 /* Is the chip hanging on a WAIT_FOR_EVENT?
2908 * If so we can simply poke the RB_WAIT bit
2909 * and break the hang. This should work on
2910 * all but the second generation chipsets.
2912 tmp
= I915_READ_CTL(ring
);
2913 if (tmp
& RING_WAIT
) {
2914 i915_handle_error(dev
, false,
2915 "Kicking stuck wait on %s",
2917 I915_WRITE_CTL(ring
, tmp
);
2918 return HANGCHECK_KICK
;
2921 if (INTEL_INFO(dev
)->gen
>= 6 && tmp
& RING_WAIT_SEMAPHORE
) {
2922 switch (semaphore_passed(ring
)) {
2924 return HANGCHECK_HUNG
;
2926 i915_handle_error(dev
, false,
2927 "Kicking stuck semaphore on %s",
2929 I915_WRITE_CTL(ring
, tmp
);
2930 return HANGCHECK_KICK
;
2932 return HANGCHECK_WAIT
;
2936 return HANGCHECK_HUNG
;
2940 * This is called when the chip hasn't reported back with completed
2941 * batchbuffers in a long time. We keep track per ring seqno progress and
2942 * if there are no progress, hangcheck score for that ring is increased.
2943 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2944 * we kick the ring. If we see no progress on three subsequent calls
2945 * we assume chip is wedged and try to fix it by resetting the chip.
2947 static void i915_hangcheck_elapsed(struct work_struct
*work
)
2949 struct drm_i915_private
*dev_priv
=
2950 container_of(work
, typeof(*dev_priv
),
2951 gpu_error
.hangcheck_work
.work
);
2952 struct drm_device
*dev
= dev_priv
->dev
;
2953 struct intel_engine_cs
*ring
;
2955 int busy_count
= 0, rings_hung
= 0;
2956 bool stuck
[I915_NUM_RINGS
] = { 0 };
2961 if (!i915
.enable_hangcheck
)
2964 for_each_ring(ring
, dev_priv
, i
) {
2969 semaphore_clear_deadlocks(dev_priv
);
2971 seqno
= ring
->get_seqno(ring
, false);
2972 acthd
= intel_ring_get_active_head(ring
);
2974 if (ring
->hangcheck
.seqno
== seqno
) {
2975 if (ring_idle(ring
, seqno
)) {
2976 ring
->hangcheck
.action
= HANGCHECK_IDLE
;
2978 if (waitqueue_active(&ring
->irq_queue
)) {
2979 /* Issue a wake-up to catch stuck h/w. */
2980 if (!test_and_set_bit(ring
->id
, &dev_priv
->gpu_error
.missed_irq_rings
)) {
2981 if (!(dev_priv
->gpu_error
.test_irq_rings
& intel_ring_flag(ring
)))
2982 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2985 DRM_INFO("Fake missed irq on %s\n",
2987 wake_up_all(&ring
->irq_queue
);
2989 /* Safeguard against driver failure */
2990 ring
->hangcheck
.score
+= BUSY
;
2994 /* We always increment the hangcheck score
2995 * if the ring is busy and still processing
2996 * the same request, so that no single request
2997 * can run indefinitely (such as a chain of
2998 * batches). The only time we do not increment
2999 * the hangcheck score on this ring, if this
3000 * ring is in a legitimate wait for another
3001 * ring. In that case the waiting ring is a
3002 * victim and we want to be sure we catch the
3003 * right culprit. Then every time we do kick
3004 * the ring, add a small increment to the
3005 * score so that we can catch a batch that is
3006 * being repeatedly kicked and so responsible
3007 * for stalling the machine.
3009 ring
->hangcheck
.action
= ring_stuck(ring
,
3012 switch (ring
->hangcheck
.action
) {
3013 case HANGCHECK_IDLE
:
3014 case HANGCHECK_WAIT
:
3015 case HANGCHECK_ACTIVE
:
3017 case HANGCHECK_ACTIVE_LOOP
:
3018 ring
->hangcheck
.score
+= BUSY
;
3020 case HANGCHECK_KICK
:
3021 ring
->hangcheck
.score
+= KICK
;
3023 case HANGCHECK_HUNG
:
3024 ring
->hangcheck
.score
+= HUNG
;
3030 ring
->hangcheck
.action
= HANGCHECK_ACTIVE
;
3032 /* Gradually reduce the count so that we catch DoS
3033 * attempts across multiple batches.
3035 if (ring
->hangcheck
.score
> 0)
3036 ring
->hangcheck
.score
--;
3038 ring
->hangcheck
.acthd
= ring
->hangcheck
.max_acthd
= 0;
3041 ring
->hangcheck
.seqno
= seqno
;
3042 ring
->hangcheck
.acthd
= acthd
;
3046 for_each_ring(ring
, dev_priv
, i
) {
3047 if (ring
->hangcheck
.score
>= HANGCHECK_SCORE_RING_HUNG
) {
3048 DRM_INFO("%s on %s\n",
3049 stuck
[i
] ? "stuck" : "no progress",
3056 return i915_handle_error(dev
, true, "Ring hung");
3059 /* Reset timer case chip hangs without another request
3061 i915_queue_hangcheck(dev
);
3064 void i915_queue_hangcheck(struct drm_device
*dev
)
3066 struct i915_gpu_error
*e
= &to_i915(dev
)->gpu_error
;
3068 if (!i915
.enable_hangcheck
)
3071 /* Don't continually defer the hangcheck so that it is always run at
3072 * least once after work has been scheduled on any ring. Otherwise,
3073 * we will ignore a hung ring if a second ring is kept busy.
3076 queue_delayed_work(e
->hangcheck_wq
, &e
->hangcheck_work
,
3077 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES
));
3080 static void ibx_irq_reset(struct drm_device
*dev
)
3082 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3084 if (HAS_PCH_NOP(dev
))
3087 GEN5_IRQ_RESET(SDE
);
3089 if (HAS_PCH_CPT(dev
) || HAS_PCH_LPT(dev
))
3090 I915_WRITE(SERR_INT
, 0xffffffff);
3094 * SDEIER is also touched by the interrupt handler to work around missed PCH
3095 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3096 * instead we unconditionally enable all PCH interrupt sources here, but then
3097 * only unmask them as needed with SDEIMR.
3099 * This function needs to be called before interrupts are enabled.
3101 static void ibx_irq_pre_postinstall(struct drm_device
*dev
)
3103 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3105 if (HAS_PCH_NOP(dev
))
3108 WARN_ON(I915_READ(SDEIER
) != 0);
3109 I915_WRITE(SDEIER
, 0xffffffff);
3110 POSTING_READ(SDEIER
);
3113 static void gen5_gt_irq_reset(struct drm_device
*dev
)
3115 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3118 if (INTEL_INFO(dev
)->gen
>= 6)
3119 GEN5_IRQ_RESET(GEN6_PM
);
3124 static void ironlake_irq_reset(struct drm_device
*dev
)
3126 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3128 I915_WRITE(HWSTAM
, 0xffffffff);
3132 I915_WRITE(GEN7_ERR_INT
, 0xffffffff);
3134 gen5_gt_irq_reset(dev
);
3139 static void vlv_display_irq_reset(struct drm_i915_private
*dev_priv
)
3143 i915_hotplug_interrupt_update(dev_priv
, 0xFFFFFFFF, 0);
3144 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3146 for_each_pipe(dev_priv
, pipe
)
3147 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3149 GEN5_IRQ_RESET(VLV_
);
3152 static void valleyview_irq_preinstall(struct drm_device
*dev
)
3154 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3157 I915_WRITE(VLV_IMR
, 0);
3158 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
3159 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
3160 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
3162 gen5_gt_irq_reset(dev
);
3164 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
3166 vlv_display_irq_reset(dev_priv
);
3169 static void gen8_gt_irq_reset(struct drm_i915_private
*dev_priv
)
3171 GEN8_IRQ_RESET_NDX(GT
, 0);
3172 GEN8_IRQ_RESET_NDX(GT
, 1);
3173 GEN8_IRQ_RESET_NDX(GT
, 2);
3174 GEN8_IRQ_RESET_NDX(GT
, 3);
3177 static void gen8_irq_reset(struct drm_device
*dev
)
3179 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3182 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3183 POSTING_READ(GEN8_MASTER_IRQ
);
3185 gen8_gt_irq_reset(dev_priv
);
3187 for_each_pipe(dev_priv
, pipe
)
3188 if (intel_display_power_is_enabled(dev_priv
,
3189 POWER_DOMAIN_PIPE(pipe
)))
3190 GEN8_IRQ_RESET_NDX(DE_PIPE
, pipe
);
3192 GEN5_IRQ_RESET(GEN8_DE_PORT_
);
3193 GEN5_IRQ_RESET(GEN8_DE_MISC_
);
3194 GEN5_IRQ_RESET(GEN8_PCU_
);
3196 if (HAS_PCH_SPLIT(dev
))
3200 void gen8_irq_power_well_post_enable(struct drm_i915_private
*dev_priv
,
3201 unsigned int pipe_mask
)
3203 uint32_t extra_ier
= GEN8_PIPE_VBLANK
| GEN8_PIPE_FIFO_UNDERRUN
;
3205 spin_lock_irq(&dev_priv
->irq_lock
);
3206 if (pipe_mask
& 1 << PIPE_A
)
3207 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_A
,
3208 dev_priv
->de_irq_mask
[PIPE_A
],
3209 ~dev_priv
->de_irq_mask
[PIPE_A
] | extra_ier
);
3210 if (pipe_mask
& 1 << PIPE_B
)
3211 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_B
,
3212 dev_priv
->de_irq_mask
[PIPE_B
],
3213 ~dev_priv
->de_irq_mask
[PIPE_B
] | extra_ier
);
3214 if (pipe_mask
& 1 << PIPE_C
)
3215 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_C
,
3216 dev_priv
->de_irq_mask
[PIPE_C
],
3217 ~dev_priv
->de_irq_mask
[PIPE_C
] | extra_ier
);
3218 spin_unlock_irq(&dev_priv
->irq_lock
);
3221 static void cherryview_irq_preinstall(struct drm_device
*dev
)
3223 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3225 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3226 POSTING_READ(GEN8_MASTER_IRQ
);
3228 gen8_gt_irq_reset(dev_priv
);
3230 GEN5_IRQ_RESET(GEN8_PCU_
);
3232 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK_CHV
);
3234 vlv_display_irq_reset(dev_priv
);
3237 static u32
intel_hpd_enabled_irqs(struct drm_device
*dev
,
3238 const u32 hpd
[HPD_NUM_PINS
])
3240 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3241 struct intel_encoder
*encoder
;
3242 u32 enabled_irqs
= 0;
3244 for_each_intel_encoder(dev
, encoder
)
3245 if (dev_priv
->hotplug
.stats
[encoder
->hpd_pin
].state
== HPD_ENABLED
)
3246 enabled_irqs
|= hpd
[encoder
->hpd_pin
];
3248 return enabled_irqs
;
3251 static void ibx_hpd_irq_setup(struct drm_device
*dev
)
3253 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3254 u32 hotplug_irqs
, hotplug
, enabled_irqs
;
3256 if (HAS_PCH_IBX(dev
)) {
3257 hotplug_irqs
= SDE_HOTPLUG_MASK
;
3258 enabled_irqs
= intel_hpd_enabled_irqs(dev
, hpd_ibx
);
3260 hotplug_irqs
= SDE_HOTPLUG_MASK_CPT
;
3261 enabled_irqs
= intel_hpd_enabled_irqs(dev
, hpd_cpt
);
3264 ibx_display_interrupt_update(dev_priv
, hotplug_irqs
, enabled_irqs
);
3267 * Enable digital hotplug on the PCH, and configure the DP short pulse
3268 * duration to 2ms (which is the minimum in the Display Port spec).
3269 * The pulse duration bits are reserved on LPT+.
3271 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
3272 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
3273 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
3274 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
3275 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
3277 * When CPU and PCH are on the same package, port A
3278 * HPD must be enabled in both north and south.
3280 if (HAS_PCH_LPT_LP(dev
))
3281 hotplug
|= PORTA_HOTPLUG_ENABLE
;
3282 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
3285 static void spt_hpd_irq_setup(struct drm_device
*dev
)
3287 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3288 u32 hotplug_irqs
, hotplug
, enabled_irqs
;
3290 hotplug_irqs
= SDE_HOTPLUG_MASK_SPT
;
3291 enabled_irqs
= intel_hpd_enabled_irqs(dev
, hpd_spt
);
3293 ibx_display_interrupt_update(dev_priv
, hotplug_irqs
, enabled_irqs
);
3295 /* Enable digital hotplug on the PCH */
3296 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
3297 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTC_HOTPLUG_ENABLE
|
3298 PORTB_HOTPLUG_ENABLE
| PORTA_HOTPLUG_ENABLE
;
3299 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
3301 hotplug
= I915_READ(PCH_PORT_HOTPLUG2
);
3302 hotplug
|= PORTE_HOTPLUG_ENABLE
;
3303 I915_WRITE(PCH_PORT_HOTPLUG2
, hotplug
);
3306 static void ilk_hpd_irq_setup(struct drm_device
*dev
)
3308 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3309 u32 hotplug_irqs
, hotplug
, enabled_irqs
;
3311 if (INTEL_INFO(dev
)->gen
>= 8) {
3312 hotplug_irqs
= GEN8_PORT_DP_A_HOTPLUG
;
3313 enabled_irqs
= intel_hpd_enabled_irqs(dev
, hpd_bdw
);
3315 bdw_update_port_irq(dev_priv
, hotplug_irqs
, enabled_irqs
);
3316 } else if (INTEL_INFO(dev
)->gen
>= 7) {
3317 hotplug_irqs
= DE_DP_A_HOTPLUG_IVB
;
3318 enabled_irqs
= intel_hpd_enabled_irqs(dev
, hpd_ivb
);
3320 ilk_update_display_irq(dev_priv
, hotplug_irqs
, enabled_irqs
);
3322 hotplug_irqs
= DE_DP_A_HOTPLUG
;
3323 enabled_irqs
= intel_hpd_enabled_irqs(dev
, hpd_ilk
);
3325 ilk_update_display_irq(dev_priv
, hotplug_irqs
, enabled_irqs
);
3329 * Enable digital hotplug on the CPU, and configure the DP short pulse
3330 * duration to 2ms (which is the minimum in the Display Port spec)
3331 * The pulse duration bits are reserved on HSW+.
3333 hotplug
= I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL
);
3334 hotplug
&= ~DIGITAL_PORTA_PULSE_DURATION_MASK
;
3335 hotplug
|= DIGITAL_PORTA_HOTPLUG_ENABLE
| DIGITAL_PORTA_PULSE_DURATION_2ms
;
3336 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL
, hotplug
);
3338 ibx_hpd_irq_setup(dev
);
3341 static void bxt_hpd_irq_setup(struct drm_device
*dev
)
3343 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3344 u32 hotplug_irqs
, hotplug
, enabled_irqs
;
3346 enabled_irqs
= intel_hpd_enabled_irqs(dev
, hpd_bxt
);
3347 hotplug_irqs
= BXT_DE_PORT_HOTPLUG_MASK
;
3349 bdw_update_port_irq(dev_priv
, hotplug_irqs
, enabled_irqs
);
3351 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
3352 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTB_HOTPLUG_ENABLE
|
3353 PORTA_HOTPLUG_ENABLE
;
3354 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
3357 static void ibx_irq_postinstall(struct drm_device
*dev
)
3359 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3362 if (HAS_PCH_NOP(dev
))
3365 if (HAS_PCH_IBX(dev
))
3366 mask
= SDE_GMBUS
| SDE_AUX_MASK
| SDE_POISON
;
3368 mask
= SDE_GMBUS_CPT
| SDE_AUX_MASK_CPT
;
3370 gen5_assert_iir_is_zero(dev_priv
, SDEIIR
);
3371 I915_WRITE(SDEIMR
, ~mask
);
3374 static void gen5_gt_irq_postinstall(struct drm_device
*dev
)
3376 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3377 u32 pm_irqs
, gt_irqs
;
3379 pm_irqs
= gt_irqs
= 0;
3381 dev_priv
->gt_irq_mask
= ~0;
3382 if (HAS_L3_DPF(dev
)) {
3383 /* L3 parity interrupt is always unmasked. */
3384 dev_priv
->gt_irq_mask
= ~GT_PARITY_ERROR(dev
);
3385 gt_irqs
|= GT_PARITY_ERROR(dev
);
3388 gt_irqs
|= GT_RENDER_USER_INTERRUPT
;
3390 gt_irqs
|= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
|
3391 ILK_BSD_USER_INTERRUPT
;
3393 gt_irqs
|= GT_BLT_USER_INTERRUPT
| GT_BSD_USER_INTERRUPT
;
3396 GEN5_IRQ_INIT(GT
, dev_priv
->gt_irq_mask
, gt_irqs
);
3398 if (INTEL_INFO(dev
)->gen
>= 6) {
3400 * RPS interrupts will get enabled/disabled on demand when RPS
3401 * itself is enabled/disabled.
3404 pm_irqs
|= PM_VEBOX_USER_INTERRUPT
;
3406 dev_priv
->pm_irq_mask
= 0xffffffff;
3407 GEN5_IRQ_INIT(GEN6_PM
, dev_priv
->pm_irq_mask
, pm_irqs
);
3411 static int ironlake_irq_postinstall(struct drm_device
*dev
)
3413 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3414 u32 display_mask
, extra_mask
;
3416 if (INTEL_INFO(dev
)->gen
>= 7) {
3417 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
|
3418 DE_PCH_EVENT_IVB
| DE_PLANEC_FLIP_DONE_IVB
|
3419 DE_PLANEB_FLIP_DONE_IVB
|
3420 DE_PLANEA_FLIP_DONE_IVB
| DE_AUX_CHANNEL_A_IVB
);
3421 extra_mask
= (DE_PIPEC_VBLANK_IVB
| DE_PIPEB_VBLANK_IVB
|
3422 DE_PIPEA_VBLANK_IVB
| DE_ERR_INT_IVB
|
3423 DE_DP_A_HOTPLUG_IVB
);
3425 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
3426 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
|
3428 DE_PIPEB_CRC_DONE
| DE_PIPEA_CRC_DONE
|
3430 extra_mask
= (DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
| DE_PCU_EVENT
|
3431 DE_PIPEB_FIFO_UNDERRUN
| DE_PIPEA_FIFO_UNDERRUN
|
3435 dev_priv
->irq_mask
= ~display_mask
;
3437 I915_WRITE(HWSTAM
, 0xeffe);
3439 ibx_irq_pre_postinstall(dev
);
3441 GEN5_IRQ_INIT(DE
, dev_priv
->irq_mask
, display_mask
| extra_mask
);
3443 gen5_gt_irq_postinstall(dev
);
3445 ibx_irq_postinstall(dev
);
3447 if (IS_IRONLAKE_M(dev
)) {
3448 /* Enable PCU event interrupts
3450 * spinlocking not required here for correctness since interrupt
3451 * setup is guaranteed to run in single-threaded context. But we
3452 * need it to make the assert_spin_locked happy. */
3453 spin_lock_irq(&dev_priv
->irq_lock
);
3454 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
3455 spin_unlock_irq(&dev_priv
->irq_lock
);
3461 static void valleyview_display_irqs_install(struct drm_i915_private
*dev_priv
)
3467 pipestat_mask
= PIPESTAT_INT_STATUS_MASK
|
3468 PIPE_FIFO_UNDERRUN_STATUS
;
3470 for_each_pipe(dev_priv
, pipe
)
3471 I915_WRITE(PIPESTAT(pipe
), pipestat_mask
);
3472 POSTING_READ(PIPESTAT(PIPE_A
));
3474 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3475 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3477 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
3478 for_each_pipe(dev_priv
, pipe
)
3479 i915_enable_pipestat(dev_priv
, pipe
, pipestat_mask
);
3481 iir_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3482 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3483 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3484 if (IS_CHERRYVIEW(dev_priv
))
3485 iir_mask
|= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
3486 dev_priv
->irq_mask
&= ~iir_mask
;
3488 I915_WRITE(VLV_IIR
, iir_mask
);
3489 I915_WRITE(VLV_IIR
, iir_mask
);
3490 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3491 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3492 POSTING_READ(VLV_IMR
);
3495 static void valleyview_display_irqs_uninstall(struct drm_i915_private
*dev_priv
)
3501 iir_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3502 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3503 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3504 if (IS_CHERRYVIEW(dev_priv
))
3505 iir_mask
|= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
3507 dev_priv
->irq_mask
|= iir_mask
;
3508 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3509 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3510 I915_WRITE(VLV_IIR
, iir_mask
);
3511 I915_WRITE(VLV_IIR
, iir_mask
);
3512 POSTING_READ(VLV_IIR
);
3514 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3515 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3517 i915_disable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
3518 for_each_pipe(dev_priv
, pipe
)
3519 i915_disable_pipestat(dev_priv
, pipe
, pipestat_mask
);
3521 pipestat_mask
= PIPESTAT_INT_STATUS_MASK
|
3522 PIPE_FIFO_UNDERRUN_STATUS
;
3524 for_each_pipe(dev_priv
, pipe
)
3525 I915_WRITE(PIPESTAT(pipe
), pipestat_mask
);
3526 POSTING_READ(PIPESTAT(PIPE_A
));
3529 void valleyview_enable_display_irqs(struct drm_i915_private
*dev_priv
)
3531 assert_spin_locked(&dev_priv
->irq_lock
);
3533 if (dev_priv
->display_irqs_enabled
)
3536 dev_priv
->display_irqs_enabled
= true;
3538 if (intel_irqs_enabled(dev_priv
))
3539 valleyview_display_irqs_install(dev_priv
);
3542 void valleyview_disable_display_irqs(struct drm_i915_private
*dev_priv
)
3544 assert_spin_locked(&dev_priv
->irq_lock
);
3546 if (!dev_priv
->display_irqs_enabled
)
3549 dev_priv
->display_irqs_enabled
= false;
3551 if (intel_irqs_enabled(dev_priv
))
3552 valleyview_display_irqs_uninstall(dev_priv
);
3555 static void vlv_display_irq_postinstall(struct drm_i915_private
*dev_priv
)
3557 dev_priv
->irq_mask
= ~0;
3559 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
3560 POSTING_READ(PORT_HOTPLUG_EN
);
3562 I915_WRITE(VLV_IIR
, 0xffffffff);
3563 I915_WRITE(VLV_IIR
, 0xffffffff);
3564 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3565 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3566 POSTING_READ(VLV_IMR
);
3568 /* Interrupt setup is already guaranteed to be single-threaded, this is
3569 * just to make the assert_spin_locked check happy. */
3570 spin_lock_irq(&dev_priv
->irq_lock
);
3571 if (dev_priv
->display_irqs_enabled
)
3572 valleyview_display_irqs_install(dev_priv
);
3573 spin_unlock_irq(&dev_priv
->irq_lock
);
3576 static int valleyview_irq_postinstall(struct drm_device
*dev
)
3578 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3580 vlv_display_irq_postinstall(dev_priv
);
3582 gen5_gt_irq_postinstall(dev
);
3584 /* ack & enable invalid PTE error interrupts */
3585 #if 0 /* FIXME: add support to irq handler for checking these bits */
3586 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
3587 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
3590 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
3595 static void gen8_gt_irq_postinstall(struct drm_i915_private
*dev_priv
)
3597 /* These are interrupts we'll toggle with the ring mask register */
3598 uint32_t gt_interrupts
[] = {
3599 GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3600 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3601 GT_RENDER_L3_PARITY_ERROR_INTERRUPT
|
3602 GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
|
3603 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
,
3604 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3605 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3606 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
|
3607 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
,
3609 GT_RENDER_USER_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
|
3610 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
3613 dev_priv
->pm_irq_mask
= 0xffffffff;
3614 GEN8_IRQ_INIT_NDX(GT
, 0, ~gt_interrupts
[0], gt_interrupts
[0]);
3615 GEN8_IRQ_INIT_NDX(GT
, 1, ~gt_interrupts
[1], gt_interrupts
[1]);
3617 * RPS interrupts will get enabled/disabled on demand when RPS itself
3618 * is enabled/disabled.
3620 GEN8_IRQ_INIT_NDX(GT
, 2, dev_priv
->pm_irq_mask
, 0);
3621 GEN8_IRQ_INIT_NDX(GT
, 3, ~gt_interrupts
[3], gt_interrupts
[3]);
3624 static void gen8_de_irq_postinstall(struct drm_i915_private
*dev_priv
)
3626 uint32_t de_pipe_masked
= GEN8_PIPE_CDCLK_CRC_DONE
;
3627 uint32_t de_pipe_enables
;
3628 u32 de_port_masked
= GEN8_AUX_CHANNEL_A
;
3629 u32 de_port_enables
;
3632 if (INTEL_INFO(dev_priv
)->gen
>= 9) {
3633 de_pipe_masked
|= GEN9_PIPE_PLANE1_FLIP_DONE
|
3634 GEN9_DE_PIPE_IRQ_FAULT_ERRORS
;
3635 de_port_masked
|= GEN9_AUX_CHANNEL_B
| GEN9_AUX_CHANNEL_C
|
3637 if (IS_BROXTON(dev_priv
))
3638 de_port_masked
|= BXT_DE_PORT_GMBUS
;
3640 de_pipe_masked
|= GEN8_PIPE_PRIMARY_FLIP_DONE
|
3641 GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
3644 de_pipe_enables
= de_pipe_masked
| GEN8_PIPE_VBLANK
|
3645 GEN8_PIPE_FIFO_UNDERRUN
;
3647 de_port_enables
= de_port_masked
;
3648 if (IS_BROXTON(dev_priv
))
3649 de_port_enables
|= BXT_DE_PORT_HOTPLUG_MASK
;
3650 else if (IS_BROADWELL(dev_priv
))
3651 de_port_enables
|= GEN8_PORT_DP_A_HOTPLUG
;
3653 dev_priv
->de_irq_mask
[PIPE_A
] = ~de_pipe_masked
;
3654 dev_priv
->de_irq_mask
[PIPE_B
] = ~de_pipe_masked
;
3655 dev_priv
->de_irq_mask
[PIPE_C
] = ~de_pipe_masked
;
3657 for_each_pipe(dev_priv
, pipe
)
3658 if (intel_display_power_is_enabled(dev_priv
,
3659 POWER_DOMAIN_PIPE(pipe
)))
3660 GEN8_IRQ_INIT_NDX(DE_PIPE
, pipe
,
3661 dev_priv
->de_irq_mask
[pipe
],
3664 GEN5_IRQ_INIT(GEN8_DE_PORT_
, ~de_port_masked
, de_port_enables
);
3667 static int gen8_irq_postinstall(struct drm_device
*dev
)
3669 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3671 if (HAS_PCH_SPLIT(dev
))
3672 ibx_irq_pre_postinstall(dev
);
3674 gen8_gt_irq_postinstall(dev_priv
);
3675 gen8_de_irq_postinstall(dev_priv
);
3677 if (HAS_PCH_SPLIT(dev
))
3678 ibx_irq_postinstall(dev
);
3680 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
3681 POSTING_READ(GEN8_MASTER_IRQ
);
3686 static int cherryview_irq_postinstall(struct drm_device
*dev
)
3688 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3690 vlv_display_irq_postinstall(dev_priv
);
3692 gen8_gt_irq_postinstall(dev_priv
);
3694 I915_WRITE(GEN8_MASTER_IRQ
, MASTER_INTERRUPT_ENABLE
);
3695 POSTING_READ(GEN8_MASTER_IRQ
);
3700 static void gen8_irq_uninstall(struct drm_device
*dev
)
3702 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3707 gen8_irq_reset(dev
);
3710 static void vlv_display_irq_uninstall(struct drm_i915_private
*dev_priv
)
3712 /* Interrupt setup is already guaranteed to be single-threaded, this is
3713 * just to make the assert_spin_locked check happy. */
3714 spin_lock_irq(&dev_priv
->irq_lock
);
3715 if (dev_priv
->display_irqs_enabled
)
3716 valleyview_display_irqs_uninstall(dev_priv
);
3717 spin_unlock_irq(&dev_priv
->irq_lock
);
3719 vlv_display_irq_reset(dev_priv
);
3721 dev_priv
->irq_mask
= ~0;
3724 static void valleyview_irq_uninstall(struct drm_device
*dev
)
3726 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3731 I915_WRITE(VLV_MASTER_IER
, 0);
3733 gen5_gt_irq_reset(dev
);
3735 I915_WRITE(HWSTAM
, 0xffffffff);
3737 vlv_display_irq_uninstall(dev_priv
);
3740 static void cherryview_irq_uninstall(struct drm_device
*dev
)
3742 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3747 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3748 POSTING_READ(GEN8_MASTER_IRQ
);
3750 gen8_gt_irq_reset(dev_priv
);
3752 GEN5_IRQ_RESET(GEN8_PCU_
);
3754 vlv_display_irq_uninstall(dev_priv
);
3757 static void ironlake_irq_uninstall(struct drm_device
*dev
)
3759 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3764 ironlake_irq_reset(dev
);
3767 static void i8xx_irq_preinstall(struct drm_device
* dev
)
3769 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3772 for_each_pipe(dev_priv
, pipe
)
3773 I915_WRITE(PIPESTAT(pipe
), 0);
3774 I915_WRITE16(IMR
, 0xffff);
3775 I915_WRITE16(IER
, 0x0);
3776 POSTING_READ16(IER
);
3779 static int i8xx_irq_postinstall(struct drm_device
*dev
)
3781 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3784 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
3786 /* Unmask the interrupts that we always want on. */
3787 dev_priv
->irq_mask
=
3788 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3789 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3790 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3791 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
3792 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
3795 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3796 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3797 I915_USER_INTERRUPT
);
3798 POSTING_READ16(IER
);
3800 /* Interrupt setup is already guaranteed to be single-threaded, this is
3801 * just to make the assert_spin_locked check happy. */
3802 spin_lock_irq(&dev_priv
->irq_lock
);
3803 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3804 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3805 spin_unlock_irq(&dev_priv
->irq_lock
);
3811 * Returns true when a page flip has completed.
3813 static bool i8xx_handle_vblank(struct drm_device
*dev
,
3814 int plane
, int pipe
, u32 iir
)
3816 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3817 u16 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
3819 if (!intel_pipe_handle_vblank(dev
, pipe
))
3822 if ((iir
& flip_pending
) == 0)
3823 goto check_page_flip
;
3825 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3826 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3827 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3828 * the flip is completed (no longer pending). Since this doesn't raise
3829 * an interrupt per se, we watch for the change at vblank.
3831 if (I915_READ16(ISR
) & flip_pending
)
3832 goto check_page_flip
;
3834 intel_prepare_page_flip(dev
, plane
);
3835 intel_finish_page_flip(dev
, pipe
);
3839 intel_check_page_flip(dev
, pipe
);
3843 static irqreturn_t
i8xx_irq_handler(int irq
, void *arg
)
3845 struct drm_device
*dev
= arg
;
3846 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3851 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3852 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3854 if (!intel_irqs_enabled(dev_priv
))
3857 iir
= I915_READ16(IIR
);
3861 while (iir
& ~flip_mask
) {
3862 /* Can't rely on pipestat interrupt bit in iir as it might
3863 * have been cleared after the pipestat interrupt was received.
3864 * It doesn't set the bit in iir again, but it still produces
3865 * interrupts (for non-MSI).
3867 spin_lock(&dev_priv
->irq_lock
);
3868 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3869 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir
);
3871 for_each_pipe(dev_priv
, pipe
) {
3872 int reg
= PIPESTAT(pipe
);
3873 pipe_stats
[pipe
] = I915_READ(reg
);
3876 * Clear the PIPE*STAT regs before the IIR
3878 if (pipe_stats
[pipe
] & 0x8000ffff)
3879 I915_WRITE(reg
, pipe_stats
[pipe
]);
3881 spin_unlock(&dev_priv
->irq_lock
);
3883 I915_WRITE16(IIR
, iir
& ~flip_mask
);
3884 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
3886 if (iir
& I915_USER_INTERRUPT
)
3887 notify_ring(&dev_priv
->ring
[RCS
]);
3889 for_each_pipe(dev_priv
, pipe
) {
3894 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
3895 i8xx_handle_vblank(dev
, plane
, pipe
, iir
))
3896 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
3898 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
3899 i9xx_pipe_crc_irq_handler(dev
, pipe
);
3901 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3902 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
3912 static void i8xx_irq_uninstall(struct drm_device
* dev
)
3914 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3917 for_each_pipe(dev_priv
, pipe
) {
3918 /* Clear enable bits; then clear status bits */
3919 I915_WRITE(PIPESTAT(pipe
), 0);
3920 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
3922 I915_WRITE16(IMR
, 0xffff);
3923 I915_WRITE16(IER
, 0x0);
3924 I915_WRITE16(IIR
, I915_READ16(IIR
));
3927 static void i915_irq_preinstall(struct drm_device
* dev
)
3929 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3932 if (I915_HAS_HOTPLUG(dev
)) {
3933 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
3934 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3937 I915_WRITE16(HWSTAM
, 0xeffe);
3938 for_each_pipe(dev_priv
, pipe
)
3939 I915_WRITE(PIPESTAT(pipe
), 0);
3940 I915_WRITE(IMR
, 0xffffffff);
3941 I915_WRITE(IER
, 0x0);
3945 static int i915_irq_postinstall(struct drm_device
*dev
)
3947 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3950 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
3952 /* Unmask the interrupts that we always want on. */
3953 dev_priv
->irq_mask
=
3954 ~(I915_ASLE_INTERRUPT
|
3955 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3956 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3957 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3958 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
3961 I915_ASLE_INTERRUPT
|
3962 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3963 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3964 I915_USER_INTERRUPT
;
3966 if (I915_HAS_HOTPLUG(dev
)) {
3967 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
3968 POSTING_READ(PORT_HOTPLUG_EN
);
3970 /* Enable in IER... */
3971 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
3972 /* and unmask in IMR */
3973 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
3976 I915_WRITE(IMR
, dev_priv
->irq_mask
);
3977 I915_WRITE(IER
, enable_mask
);
3980 i915_enable_asle_pipestat(dev
);
3982 /* Interrupt setup is already guaranteed to be single-threaded, this is
3983 * just to make the assert_spin_locked check happy. */
3984 spin_lock_irq(&dev_priv
->irq_lock
);
3985 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3986 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3987 spin_unlock_irq(&dev_priv
->irq_lock
);
3993 * Returns true when a page flip has completed.
3995 static bool i915_handle_vblank(struct drm_device
*dev
,
3996 int plane
, int pipe
, u32 iir
)
3998 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3999 u32 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
4001 if (!intel_pipe_handle_vblank(dev
, pipe
))
4004 if ((iir
& flip_pending
) == 0)
4005 goto check_page_flip
;
4007 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4008 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4009 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4010 * the flip is completed (no longer pending). Since this doesn't raise
4011 * an interrupt per se, we watch for the change at vblank.
4013 if (I915_READ(ISR
) & flip_pending
)
4014 goto check_page_flip
;
4016 intel_prepare_page_flip(dev
, plane
);
4017 intel_finish_page_flip(dev
, pipe
);
4021 intel_check_page_flip(dev
, pipe
);
4025 static irqreturn_t
i915_irq_handler(int irq
, void *arg
)
4027 struct drm_device
*dev
= arg
;
4028 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4029 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
4031 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4032 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4033 int pipe
, ret
= IRQ_NONE
;
4035 if (!intel_irqs_enabled(dev_priv
))
4038 iir
= I915_READ(IIR
);
4040 bool irq_received
= (iir
& ~flip_mask
) != 0;
4041 bool blc_event
= false;
4043 /* Can't rely on pipestat interrupt bit in iir as it might
4044 * have been cleared after the pipestat interrupt was received.
4045 * It doesn't set the bit in iir again, but it still produces
4046 * interrupts (for non-MSI).
4048 spin_lock(&dev_priv
->irq_lock
);
4049 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4050 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir
);
4052 for_each_pipe(dev_priv
, pipe
) {
4053 int reg
= PIPESTAT(pipe
);
4054 pipe_stats
[pipe
] = I915_READ(reg
);
4056 /* Clear the PIPE*STAT regs before the IIR */
4057 if (pipe_stats
[pipe
] & 0x8000ffff) {
4058 I915_WRITE(reg
, pipe_stats
[pipe
]);
4059 irq_received
= true;
4062 spin_unlock(&dev_priv
->irq_lock
);
4067 /* Consume port. Then clear IIR or we'll miss events */
4068 if (I915_HAS_HOTPLUG(dev
) &&
4069 iir
& I915_DISPLAY_PORT_INTERRUPT
)
4070 i9xx_hpd_irq_handler(dev
);
4072 I915_WRITE(IIR
, iir
& ~flip_mask
);
4073 new_iir
= I915_READ(IIR
); /* Flush posted writes */
4075 if (iir
& I915_USER_INTERRUPT
)
4076 notify_ring(&dev_priv
->ring
[RCS
]);
4078 for_each_pipe(dev_priv
, pipe
) {
4083 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
4084 i915_handle_vblank(dev
, plane
, pipe
, iir
))
4085 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
4087 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
4090 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4091 i9xx_pipe_crc_irq_handler(dev
, pipe
);
4093 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
4094 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
4098 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4099 intel_opregion_asle_intr(dev
);
4101 /* With MSI, interrupts are only generated when iir
4102 * transitions from zero to nonzero. If another bit got
4103 * set while we were handling the existing iir bits, then
4104 * we would never get another interrupt.
4106 * This is fine on non-MSI as well, as if we hit this path
4107 * we avoid exiting the interrupt handler only to generate
4110 * Note that for MSI this could cause a stray interrupt report
4111 * if an interrupt landed in the time between writing IIR and
4112 * the posting read. This should be rare enough to never
4113 * trigger the 99% of 100,000 interrupts test for disabling
4118 } while (iir
& ~flip_mask
);
4123 static void i915_irq_uninstall(struct drm_device
* dev
)
4125 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4128 if (I915_HAS_HOTPLUG(dev
)) {
4129 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
4130 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4133 I915_WRITE16(HWSTAM
, 0xffff);
4134 for_each_pipe(dev_priv
, pipe
) {
4135 /* Clear enable bits; then clear status bits */
4136 I915_WRITE(PIPESTAT(pipe
), 0);
4137 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
4139 I915_WRITE(IMR
, 0xffffffff);
4140 I915_WRITE(IER
, 0x0);
4142 I915_WRITE(IIR
, I915_READ(IIR
));
4145 static void i965_irq_preinstall(struct drm_device
* dev
)
4147 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4150 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
4151 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4153 I915_WRITE(HWSTAM
, 0xeffe);
4154 for_each_pipe(dev_priv
, pipe
)
4155 I915_WRITE(PIPESTAT(pipe
), 0);
4156 I915_WRITE(IMR
, 0xffffffff);
4157 I915_WRITE(IER
, 0x0);
4161 static int i965_irq_postinstall(struct drm_device
*dev
)
4163 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4167 /* Unmask the interrupts that we always want on. */
4168 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
4169 I915_DISPLAY_PORT_INTERRUPT
|
4170 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4171 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4172 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4173 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
4174 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
4176 enable_mask
= ~dev_priv
->irq_mask
;
4177 enable_mask
&= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4178 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
4179 enable_mask
|= I915_USER_INTERRUPT
;
4182 enable_mask
|= I915_BSD_USER_INTERRUPT
;
4184 /* Interrupt setup is already guaranteed to be single-threaded, this is
4185 * just to make the assert_spin_locked check happy. */
4186 spin_lock_irq(&dev_priv
->irq_lock
);
4187 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
4188 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4189 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4190 spin_unlock_irq(&dev_priv
->irq_lock
);
4193 * Enable some error detection, note the instruction error mask
4194 * bit is reserved, so we leave it masked.
4197 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
4198 GM45_ERROR_MEM_PRIV
|
4199 GM45_ERROR_CP_PRIV
|
4200 I915_ERROR_MEMORY_REFRESH
);
4202 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
4203 I915_ERROR_MEMORY_REFRESH
);
4205 I915_WRITE(EMR
, error_mask
);
4207 I915_WRITE(IMR
, dev_priv
->irq_mask
);
4208 I915_WRITE(IER
, enable_mask
);
4211 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
4212 POSTING_READ(PORT_HOTPLUG_EN
);
4214 i915_enable_asle_pipestat(dev
);
4219 static void i915_hpd_irq_setup(struct drm_device
*dev
)
4221 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4224 assert_spin_locked(&dev_priv
->irq_lock
);
4226 /* Note HDMI and DP share hotplug bits */
4227 /* enable bits are the same for all generations */
4228 hotplug_en
= intel_hpd_enabled_irqs(dev
, hpd_mask_i915
);
4229 /* Programming the CRT detection parameters tends
4230 to generate a spurious hotplug event about three
4231 seconds later. So just do it once.
4234 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
4235 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
4237 /* Ignore TV since it's buggy */
4238 i915_hotplug_interrupt_update_locked(dev_priv
,
4239 HOTPLUG_INT_EN_MASK
|
4240 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK
|
4241 CRT_HOTPLUG_ACTIVATION_PERIOD_64
,
4245 static irqreturn_t
i965_irq_handler(int irq
, void *arg
)
4247 struct drm_device
*dev
= arg
;
4248 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4250 u32 pipe_stats
[I915_MAX_PIPES
];
4251 int ret
= IRQ_NONE
, pipe
;
4253 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4254 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4256 if (!intel_irqs_enabled(dev_priv
))
4259 iir
= I915_READ(IIR
);
4262 bool irq_received
= (iir
& ~flip_mask
) != 0;
4263 bool blc_event
= false;
4265 /* Can't rely on pipestat interrupt bit in iir as it might
4266 * have been cleared after the pipestat interrupt was received.
4267 * It doesn't set the bit in iir again, but it still produces
4268 * interrupts (for non-MSI).
4270 spin_lock(&dev_priv
->irq_lock
);
4271 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4272 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir
);
4274 for_each_pipe(dev_priv
, pipe
) {
4275 int reg
= PIPESTAT(pipe
);
4276 pipe_stats
[pipe
] = I915_READ(reg
);
4279 * Clear the PIPE*STAT regs before the IIR
4281 if (pipe_stats
[pipe
] & 0x8000ffff) {
4282 I915_WRITE(reg
, pipe_stats
[pipe
]);
4283 irq_received
= true;
4286 spin_unlock(&dev_priv
->irq_lock
);
4293 /* Consume port. Then clear IIR or we'll miss events */
4294 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
4295 i9xx_hpd_irq_handler(dev
);
4297 I915_WRITE(IIR
, iir
& ~flip_mask
);
4298 new_iir
= I915_READ(IIR
); /* Flush posted writes */
4300 if (iir
& I915_USER_INTERRUPT
)
4301 notify_ring(&dev_priv
->ring
[RCS
]);
4302 if (iir
& I915_BSD_USER_INTERRUPT
)
4303 notify_ring(&dev_priv
->ring
[VCS
]);
4305 for_each_pipe(dev_priv
, pipe
) {
4306 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
4307 i915_handle_vblank(dev
, pipe
, pipe
, iir
))
4308 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(pipe
);
4310 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
4313 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4314 i9xx_pipe_crc_irq_handler(dev
, pipe
);
4316 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
4317 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
4320 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4321 intel_opregion_asle_intr(dev
);
4323 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
4324 gmbus_irq_handler(dev
);
4326 /* With MSI, interrupts are only generated when iir
4327 * transitions from zero to nonzero. If another bit got
4328 * set while we were handling the existing iir bits, then
4329 * we would never get another interrupt.
4331 * This is fine on non-MSI as well, as if we hit this path
4332 * we avoid exiting the interrupt handler only to generate
4335 * Note that for MSI this could cause a stray interrupt report
4336 * if an interrupt landed in the time between writing IIR and
4337 * the posting read. This should be rare enough to never
4338 * trigger the 99% of 100,000 interrupts test for disabling
4347 static void i965_irq_uninstall(struct drm_device
* dev
)
4349 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4355 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
4356 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4358 I915_WRITE(HWSTAM
, 0xffffffff);
4359 for_each_pipe(dev_priv
, pipe
)
4360 I915_WRITE(PIPESTAT(pipe
), 0);
4361 I915_WRITE(IMR
, 0xffffffff);
4362 I915_WRITE(IER
, 0x0);
4364 for_each_pipe(dev_priv
, pipe
)
4365 I915_WRITE(PIPESTAT(pipe
),
4366 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
4367 I915_WRITE(IIR
, I915_READ(IIR
));
4371 * intel_irq_init - initializes irq support
4372 * @dev_priv: i915 device instance
4374 * This function initializes all the irq support including work items, timers
4375 * and all the vtables. It does not setup the interrupt itself though.
4377 void intel_irq_init(struct drm_i915_private
*dev_priv
)
4379 struct drm_device
*dev
= dev_priv
->dev
;
4381 intel_hpd_init_work(dev_priv
);
4383 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
4384 INIT_WORK(&dev_priv
->l3_parity
.error_work
, ivybridge_parity_work
);
4386 /* Let's track the enabled rps events */
4387 if (IS_VALLEYVIEW(dev_priv
) && !IS_CHERRYVIEW(dev_priv
))
4388 /* WaGsvRC0ResidencyMethod:vlv */
4389 dev_priv
->pm_rps_events
= GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_UP_EI_EXPIRED
;
4391 dev_priv
->pm_rps_events
= GEN6_PM_RPS_EVENTS
;
4393 INIT_DELAYED_WORK(&dev_priv
->gpu_error
.hangcheck_work
,
4394 i915_hangcheck_elapsed
);
4396 pm_qos_add_request(&dev_priv
->pm_qos
, PM_QOS_CPU_DMA_LATENCY
, PM_QOS_DEFAULT_VALUE
);
4398 if (IS_GEN2(dev_priv
)) {
4399 dev
->max_vblank_count
= 0;
4400 dev
->driver
->get_vblank_counter
= i8xx_get_vblank_counter
;
4401 } else if (IS_G4X(dev_priv
) || INTEL_INFO(dev_priv
)->gen
>= 5) {
4402 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
4403 dev
->driver
->get_vblank_counter
= g4x_get_vblank_counter
;
4405 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
4406 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
4410 * Opt out of the vblank disable timer on everything except gen2.
4411 * Gen2 doesn't have a hardware frame counter and so depends on
4412 * vblank interrupts to produce sane vblank seuquence numbers.
4414 if (!IS_GEN2(dev_priv
))
4415 dev
->vblank_disable_immediate
= true;
4417 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
4418 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
4420 if (IS_CHERRYVIEW(dev_priv
)) {
4421 dev
->driver
->irq_handler
= cherryview_irq_handler
;
4422 dev
->driver
->irq_preinstall
= cherryview_irq_preinstall
;
4423 dev
->driver
->irq_postinstall
= cherryview_irq_postinstall
;
4424 dev
->driver
->irq_uninstall
= cherryview_irq_uninstall
;
4425 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4426 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4427 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4428 } else if (IS_VALLEYVIEW(dev_priv
)) {
4429 dev
->driver
->irq_handler
= valleyview_irq_handler
;
4430 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
4431 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
4432 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
4433 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4434 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4435 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4436 } else if (INTEL_INFO(dev_priv
)->gen
>= 8) {
4437 dev
->driver
->irq_handler
= gen8_irq_handler
;
4438 dev
->driver
->irq_preinstall
= gen8_irq_reset
;
4439 dev
->driver
->irq_postinstall
= gen8_irq_postinstall
;
4440 dev
->driver
->irq_uninstall
= gen8_irq_uninstall
;
4441 dev
->driver
->enable_vblank
= gen8_enable_vblank
;
4442 dev
->driver
->disable_vblank
= gen8_disable_vblank
;
4443 if (IS_BROXTON(dev
))
4444 dev_priv
->display
.hpd_irq_setup
= bxt_hpd_irq_setup
;
4445 else if (HAS_PCH_SPT(dev
))
4446 dev_priv
->display
.hpd_irq_setup
= spt_hpd_irq_setup
;
4448 dev_priv
->display
.hpd_irq_setup
= ilk_hpd_irq_setup
;
4449 } else if (HAS_PCH_SPLIT(dev
)) {
4450 dev
->driver
->irq_handler
= ironlake_irq_handler
;
4451 dev
->driver
->irq_preinstall
= ironlake_irq_reset
;
4452 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
4453 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
4454 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
4455 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
4456 dev_priv
->display
.hpd_irq_setup
= ilk_hpd_irq_setup
;
4458 if (INTEL_INFO(dev_priv
)->gen
== 2) {
4459 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
4460 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
4461 dev
->driver
->irq_handler
= i8xx_irq_handler
;
4462 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
4463 } else if (INTEL_INFO(dev_priv
)->gen
== 3) {
4464 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
4465 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
4466 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
4467 dev
->driver
->irq_handler
= i915_irq_handler
;
4469 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
4470 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
4471 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
4472 dev
->driver
->irq_handler
= i965_irq_handler
;
4474 if (I915_HAS_HOTPLUG(dev_priv
))
4475 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4476 dev
->driver
->enable_vblank
= i915_enable_vblank
;
4477 dev
->driver
->disable_vblank
= i915_disable_vblank
;
4482 * intel_irq_install - enables the hardware interrupt
4483 * @dev_priv: i915 device instance
4485 * This function enables the hardware interrupt handling, but leaves the hotplug
4486 * handling still disabled. It is called after intel_irq_init().
4488 * In the driver load and resume code we need working interrupts in a few places
4489 * but don't want to deal with the hassle of concurrent probe and hotplug
4490 * workers. Hence the split into this two-stage approach.
4492 int intel_irq_install(struct drm_i915_private
*dev_priv
)
4495 * We enable some interrupt sources in our postinstall hooks, so mark
4496 * interrupts as enabled _before_ actually enabling them to avoid
4497 * special cases in our ordering checks.
4499 dev_priv
->pm
.irqs_enabled
= true;
4501 return drm_irq_install(dev_priv
->dev
, dev_priv
->dev
->pdev
->irq
);
4505 * intel_irq_uninstall - finilizes all irq handling
4506 * @dev_priv: i915 device instance
4508 * This stops interrupt and hotplug handling and unregisters and frees all
4509 * resources acquired in the init functions.
4511 void intel_irq_uninstall(struct drm_i915_private
*dev_priv
)
4513 drm_irq_uninstall(dev_priv
->dev
);
4514 intel_hpd_cancel_work(dev_priv
);
4515 dev_priv
->pm
.irqs_enabled
= false;
4519 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4520 * @dev_priv: i915 device instance
4522 * This function is used to disable interrupts at runtime, both in the runtime
4523 * pm and the system suspend/resume code.
4525 void intel_runtime_pm_disable_interrupts(struct drm_i915_private
*dev_priv
)
4527 dev_priv
->dev
->driver
->irq_uninstall(dev_priv
->dev
);
4528 dev_priv
->pm
.irqs_enabled
= false;
4529 synchronize_irq(dev_priv
->dev
->irq
);
4533 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4534 * @dev_priv: i915 device instance
4536 * This function is used to enable interrupts at runtime, both in the runtime
4537 * pm and the system suspend/resume code.
4539 void intel_runtime_pm_enable_interrupts(struct drm_i915_private
*dev_priv
)
4541 dev_priv
->pm
.irqs_enabled
= true;
4542 dev_priv
->dev
->driver
->irq_preinstall(dev_priv
->dev
);
4543 dev_priv
->dev
->driver
->irq_postinstall(dev_priv
->dev
);