1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
41 * DOC: interrupt handling
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
48 static const u32 hpd_ibx
[HPD_NUM_PINS
] = {
49 [HPD_CRT
] = SDE_CRT_HOTPLUG
,
50 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG
,
51 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG
,
52 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG
,
53 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG
56 static const u32 hpd_cpt
[HPD_NUM_PINS
] = {
57 [HPD_CRT
] = SDE_CRT_HOTPLUG_CPT
,
58 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG_CPT
,
59 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG_CPT
,
60 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG_CPT
,
61 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG_CPT
64 static const u32 hpd_mask_i915
[HPD_NUM_PINS
] = {
65 [HPD_CRT
] = CRT_HOTPLUG_INT_EN
,
66 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_EN
,
67 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_EN
,
68 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_EN
,
69 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_EN
,
70 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_EN
73 static const u32 hpd_status_g4x
[HPD_NUM_PINS
] = {
74 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
75 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_G4X
,
76 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_G4X
,
77 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
78 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
79 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
82 static const u32 hpd_status_i915
[HPD_NUM_PINS
] = {
83 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
84 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_I915
,
85 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_I915
,
86 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
87 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
88 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
92 static const u32 hpd_bxt
[HPD_NUM_PINS
] = {
93 [HPD_PORT_B
] = BXT_DE_PORT_HP_DDIB
,
94 [HPD_PORT_C
] = BXT_DE_PORT_HP_DDIC
97 /* IIR can theoretically queue up two events. Be paranoid. */
98 #define GEN8_IRQ_RESET_NDX(type, which) do { \
99 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
100 POSTING_READ(GEN8_##type##_IMR(which)); \
101 I915_WRITE(GEN8_##type##_IER(which), 0); \
102 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
103 POSTING_READ(GEN8_##type##_IIR(which)); \
104 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
105 POSTING_READ(GEN8_##type##_IIR(which)); \
108 #define GEN5_IRQ_RESET(type) do { \
109 I915_WRITE(type##IMR, 0xffffffff); \
110 POSTING_READ(type##IMR); \
111 I915_WRITE(type##IER, 0); \
112 I915_WRITE(type##IIR, 0xffffffff); \
113 POSTING_READ(type##IIR); \
114 I915_WRITE(type##IIR, 0xffffffff); \
115 POSTING_READ(type##IIR); \
119 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
121 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
122 u32 val = I915_READ(reg); \
124 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
126 I915_WRITE((reg), 0xffffffff); \
128 I915_WRITE((reg), 0xffffffff); \
133 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
134 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
135 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
136 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
137 POSTING_READ(GEN8_##type##_IMR(which)); \
140 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
141 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
142 I915_WRITE(type##IER, (ier_val)); \
143 I915_WRITE(type##IMR, (imr_val)); \
144 POSTING_READ(type##IMR); \
147 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
);
149 /* For display hotplug interrupt */
151 ironlake_enable_display_irq(struct drm_i915_private
*dev_priv
, u32 mask
)
153 assert_spin_locked(&dev_priv
->irq_lock
);
155 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
158 if ((dev_priv
->irq_mask
& mask
) != 0) {
159 dev_priv
->irq_mask
&= ~mask
;
160 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
166 ironlake_disable_display_irq(struct drm_i915_private
*dev_priv
, u32 mask
)
168 assert_spin_locked(&dev_priv
->irq_lock
);
170 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
173 if ((dev_priv
->irq_mask
& mask
) != mask
) {
174 dev_priv
->irq_mask
|= mask
;
175 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
181 * ilk_update_gt_irq - update GTIMR
182 * @dev_priv: driver private
183 * @interrupt_mask: mask of interrupt bits to update
184 * @enabled_irq_mask: mask of interrupt bits to enable
186 static void ilk_update_gt_irq(struct drm_i915_private
*dev_priv
,
187 uint32_t interrupt_mask
,
188 uint32_t enabled_irq_mask
)
190 assert_spin_locked(&dev_priv
->irq_lock
);
192 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
194 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
197 dev_priv
->gt_irq_mask
&= ~interrupt_mask
;
198 dev_priv
->gt_irq_mask
|= (~enabled_irq_mask
& interrupt_mask
);
199 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
203 void gen5_enable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
205 ilk_update_gt_irq(dev_priv
, mask
, mask
);
208 void gen5_disable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
210 ilk_update_gt_irq(dev_priv
, mask
, 0);
213 static u32
gen6_pm_iir(struct drm_i915_private
*dev_priv
)
215 return INTEL_INFO(dev_priv
)->gen
>= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR
;
218 static u32
gen6_pm_imr(struct drm_i915_private
*dev_priv
)
220 return INTEL_INFO(dev_priv
)->gen
>= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR
;
223 static u32
gen6_pm_ier(struct drm_i915_private
*dev_priv
)
225 return INTEL_INFO(dev_priv
)->gen
>= 8 ? GEN8_GT_IER(2) : GEN6_PMIER
;
229 * snb_update_pm_irq - update GEN6_PMIMR
230 * @dev_priv: driver private
231 * @interrupt_mask: mask of interrupt bits to update
232 * @enabled_irq_mask: mask of interrupt bits to enable
234 static void snb_update_pm_irq(struct drm_i915_private
*dev_priv
,
235 uint32_t interrupt_mask
,
236 uint32_t enabled_irq_mask
)
240 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
242 assert_spin_locked(&dev_priv
->irq_lock
);
244 new_val
= dev_priv
->pm_irq_mask
;
245 new_val
&= ~interrupt_mask
;
246 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
248 if (new_val
!= dev_priv
->pm_irq_mask
) {
249 dev_priv
->pm_irq_mask
= new_val
;
250 I915_WRITE(gen6_pm_imr(dev_priv
), dev_priv
->pm_irq_mask
);
251 POSTING_READ(gen6_pm_imr(dev_priv
));
255 void gen6_enable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
257 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
260 snb_update_pm_irq(dev_priv
, mask
, mask
);
263 static void __gen6_disable_pm_irq(struct drm_i915_private
*dev_priv
,
266 snb_update_pm_irq(dev_priv
, mask
, 0);
269 void gen6_disable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
271 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
274 __gen6_disable_pm_irq(dev_priv
, mask
);
277 void gen6_reset_rps_interrupts(struct drm_device
*dev
)
279 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
280 uint32_t reg
= gen6_pm_iir(dev_priv
);
282 spin_lock_irq(&dev_priv
->irq_lock
);
283 I915_WRITE(reg
, dev_priv
->pm_rps_events
);
284 I915_WRITE(reg
, dev_priv
->pm_rps_events
);
286 dev_priv
->rps
.pm_iir
= 0;
287 spin_unlock_irq(&dev_priv
->irq_lock
);
290 void gen6_enable_rps_interrupts(struct drm_device
*dev
)
292 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
294 spin_lock_irq(&dev_priv
->irq_lock
);
296 WARN_ON(dev_priv
->rps
.pm_iir
);
297 WARN_ON(I915_READ(gen6_pm_iir(dev_priv
)) & dev_priv
->pm_rps_events
);
298 dev_priv
->rps
.interrupts_enabled
= true;
299 I915_WRITE(gen6_pm_ier(dev_priv
), I915_READ(gen6_pm_ier(dev_priv
)) |
300 dev_priv
->pm_rps_events
);
301 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
303 spin_unlock_irq(&dev_priv
->irq_lock
);
306 u32
gen6_sanitize_rps_pm_mask(struct drm_i915_private
*dev_priv
, u32 mask
)
309 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
310 * if GEN6_PM_UP_EI_EXPIRED is masked.
312 * TODO: verify if this can be reproduced on VLV,CHV.
314 if (INTEL_INFO(dev_priv
)->gen
<= 7 && !IS_HASWELL(dev_priv
))
315 mask
&= ~GEN6_PM_RP_UP_EI_EXPIRED
;
317 if (INTEL_INFO(dev_priv
)->gen
>= 8)
318 mask
&= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP
;
323 void gen6_disable_rps_interrupts(struct drm_device
*dev
)
325 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
327 spin_lock_irq(&dev_priv
->irq_lock
);
328 dev_priv
->rps
.interrupts_enabled
= false;
329 spin_unlock_irq(&dev_priv
->irq_lock
);
331 cancel_work_sync(&dev_priv
->rps
.work
);
333 spin_lock_irq(&dev_priv
->irq_lock
);
335 I915_WRITE(GEN6_PMINTRMSK
, gen6_sanitize_rps_pm_mask(dev_priv
, ~0));
337 __gen6_disable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
338 I915_WRITE(gen6_pm_ier(dev_priv
), I915_READ(gen6_pm_ier(dev_priv
)) &
339 ~dev_priv
->pm_rps_events
);
341 spin_unlock_irq(&dev_priv
->irq_lock
);
343 synchronize_irq(dev
->irq
);
347 * ibx_display_interrupt_update - update SDEIMR
348 * @dev_priv: driver private
349 * @interrupt_mask: mask of interrupt bits to update
350 * @enabled_irq_mask: mask of interrupt bits to enable
352 void ibx_display_interrupt_update(struct drm_i915_private
*dev_priv
,
353 uint32_t interrupt_mask
,
354 uint32_t enabled_irq_mask
)
356 uint32_t sdeimr
= I915_READ(SDEIMR
);
357 sdeimr
&= ~interrupt_mask
;
358 sdeimr
|= (~enabled_irq_mask
& interrupt_mask
);
360 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
362 assert_spin_locked(&dev_priv
->irq_lock
);
364 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
367 I915_WRITE(SDEIMR
, sdeimr
);
368 POSTING_READ(SDEIMR
);
372 __i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
373 u32 enable_mask
, u32 status_mask
)
375 u32 reg
= PIPESTAT(pipe
);
376 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
378 assert_spin_locked(&dev_priv
->irq_lock
);
379 WARN_ON(!intel_irqs_enabled(dev_priv
));
381 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
382 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
383 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
384 pipe_name(pipe
), enable_mask
, status_mask
))
387 if ((pipestat
& enable_mask
) == enable_mask
)
390 dev_priv
->pipestat_irq_mask
[pipe
] |= status_mask
;
392 /* Enable the interrupt, clear any pending status */
393 pipestat
|= enable_mask
| status_mask
;
394 I915_WRITE(reg
, pipestat
);
399 __i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
400 u32 enable_mask
, u32 status_mask
)
402 u32 reg
= PIPESTAT(pipe
);
403 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
405 assert_spin_locked(&dev_priv
->irq_lock
);
406 WARN_ON(!intel_irqs_enabled(dev_priv
));
408 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
409 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
410 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
411 pipe_name(pipe
), enable_mask
, status_mask
))
414 if ((pipestat
& enable_mask
) == 0)
417 dev_priv
->pipestat_irq_mask
[pipe
] &= ~status_mask
;
419 pipestat
&= ~enable_mask
;
420 I915_WRITE(reg
, pipestat
);
424 static u32
vlv_get_pipestat_enable_mask(struct drm_device
*dev
, u32 status_mask
)
426 u32 enable_mask
= status_mask
<< 16;
429 * On pipe A we don't support the PSR interrupt yet,
430 * on pipe B and C the same bit MBZ.
432 if (WARN_ON_ONCE(status_mask
& PIPE_A_PSR_STATUS_VLV
))
435 * On pipe B and C we don't support the PSR interrupt yet, on pipe
436 * A the same bit is for perf counters which we don't use either.
438 if (WARN_ON_ONCE(status_mask
& PIPE_B_PSR_STATUS_VLV
))
441 enable_mask
&= ~(PIPE_FIFO_UNDERRUN_STATUS
|
442 SPRITE0_FLIP_DONE_INT_EN_VLV
|
443 SPRITE1_FLIP_DONE_INT_EN_VLV
);
444 if (status_mask
& SPRITE0_FLIP_DONE_INT_STATUS_VLV
)
445 enable_mask
|= SPRITE0_FLIP_DONE_INT_EN_VLV
;
446 if (status_mask
& SPRITE1_FLIP_DONE_INT_STATUS_VLV
)
447 enable_mask
|= SPRITE1_FLIP_DONE_INT_EN_VLV
;
453 i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
458 if (IS_VALLEYVIEW(dev_priv
->dev
))
459 enable_mask
= vlv_get_pipestat_enable_mask(dev_priv
->dev
,
462 enable_mask
= status_mask
<< 16;
463 __i915_enable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
467 i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
472 if (IS_VALLEYVIEW(dev_priv
->dev
))
473 enable_mask
= vlv_get_pipestat_enable_mask(dev_priv
->dev
,
476 enable_mask
= status_mask
<< 16;
477 __i915_disable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
481 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
483 static void i915_enable_asle_pipestat(struct drm_device
*dev
)
485 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
487 if (!dev_priv
->opregion
.asle
|| !IS_MOBILE(dev
))
490 spin_lock_irq(&dev_priv
->irq_lock
);
492 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_LEGACY_BLC_EVENT_STATUS
);
493 if (INTEL_INFO(dev
)->gen
>= 4)
494 i915_enable_pipestat(dev_priv
, PIPE_A
,
495 PIPE_LEGACY_BLC_EVENT_STATUS
);
497 spin_unlock_irq(&dev_priv
->irq_lock
);
501 * This timing diagram depicts the video signal in and
502 * around the vertical blanking period.
504 * Assumptions about the fictitious mode used in this example:
506 * vsync_start = vblank_start + 1
507 * vsync_end = vblank_start + 2
508 * vtotal = vblank_start + 3
511 * latch double buffered registers
512 * increment frame counter (ctg+)
513 * generate start of vblank interrupt (gen4+)
516 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
517 * | may be shifted forward 1-3 extra lines via PIPECONF
519 * | | start of vsync:
520 * | | generate vsync interrupt
522 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
523 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
524 * ----va---> <-----------------vb--------------------> <--------va-------------
525 * | | <----vs-----> |
526 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
527 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
528 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
530 * last visible pixel first visible pixel
531 * | increment frame counter (gen3/4)
532 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
534 * x = horizontal active
535 * _ = horizontal blanking
536 * hs = horizontal sync
537 * va = vertical active
538 * vb = vertical blanking
540 * vbs = vblank_start (number)
543 * - most events happen at the start of horizontal sync
544 * - frame start happens at the start of horizontal blank, 1-4 lines
545 * (depending on PIPECONF settings) after the start of vblank
546 * - gen3/4 pixel and frame counter are synchronized with the start
547 * of horizontal active on the first line of vertical active
550 static u32
i8xx_get_vblank_counter(struct drm_device
*dev
, int pipe
)
552 /* Gen2 doesn't have a hardware frame counter */
556 /* Called from drm generic code, passed a 'crtc', which
557 * we use as a pipe index
559 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
561 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
562 unsigned long high_frame
;
563 unsigned long low_frame
;
564 u32 high1
, high2
, low
, pixel
, vbl_start
, hsync_start
, htotal
;
565 struct intel_crtc
*intel_crtc
=
566 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
567 const struct drm_display_mode
*mode
=
568 &intel_crtc
->config
->base
.adjusted_mode
;
570 htotal
= mode
->crtc_htotal
;
571 hsync_start
= mode
->crtc_hsync_start
;
572 vbl_start
= mode
->crtc_vblank_start
;
573 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
574 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
576 /* Convert to pixel count */
579 /* Start of vblank event occurs at start of hsync */
580 vbl_start
-= htotal
- hsync_start
;
582 high_frame
= PIPEFRAME(pipe
);
583 low_frame
= PIPEFRAMEPIXEL(pipe
);
586 * High & low register fields aren't synchronized, so make sure
587 * we get a low value that's stable across two reads of the high
591 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
592 low
= I915_READ(low_frame
);
593 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
594 } while (high1
!= high2
);
596 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
597 pixel
= low
& PIPE_PIXEL_MASK
;
598 low
>>= PIPE_FRAME_LOW_SHIFT
;
601 * The frame counter increments at beginning of active.
602 * Cook up a vblank counter by also checking the pixel
603 * counter against vblank start.
605 return (((high1
<< 8) | low
) + (pixel
>= vbl_start
)) & 0xffffff;
608 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
610 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
611 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
613 return I915_READ(reg
);
616 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
617 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
619 static int __intel_get_crtc_scanline(struct intel_crtc
*crtc
)
621 struct drm_device
*dev
= crtc
->base
.dev
;
622 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
623 const struct drm_display_mode
*mode
= &crtc
->config
->base
.adjusted_mode
;
624 enum pipe pipe
= crtc
->pipe
;
625 int position
, vtotal
;
627 vtotal
= mode
->crtc_vtotal
;
628 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
632 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN2
;
634 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN3
;
637 * See update_scanline_offset() for the details on the
638 * scanline_offset adjustment.
640 return (position
+ crtc
->scanline_offset
) % vtotal
;
643 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
644 unsigned int flags
, int *vpos
, int *hpos
,
645 ktime_t
*stime
, ktime_t
*etime
)
647 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
648 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
649 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
650 const struct drm_display_mode
*mode
= &intel_crtc
->config
->base
.adjusted_mode
;
652 int vbl_start
, vbl_end
, hsync_start
, htotal
, vtotal
;
655 unsigned long irqflags
;
657 if (!intel_crtc
->active
) {
658 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
659 "pipe %c\n", pipe_name(pipe
));
663 htotal
= mode
->crtc_htotal
;
664 hsync_start
= mode
->crtc_hsync_start
;
665 vtotal
= mode
->crtc_vtotal
;
666 vbl_start
= mode
->crtc_vblank_start
;
667 vbl_end
= mode
->crtc_vblank_end
;
669 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
670 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
675 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
678 * Lock uncore.lock, as we will do multiple timing critical raw
679 * register reads, potentially with preemption disabled, so the
680 * following code must not block on uncore.lock.
682 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
684 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
686 /* Get optional system timestamp before query. */
688 *stime
= ktime_get();
690 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
691 /* No obvious pixelcount register. Only query vertical
692 * scanout position from Display scan line register.
694 position
= __intel_get_crtc_scanline(intel_crtc
);
696 /* Have access to pixelcount since start of frame.
697 * We can split this into vertical and horizontal
700 position
= (__raw_i915_read32(dev_priv
, PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
702 /* convert to pixel counts */
708 * In interlaced modes, the pixel counter counts all pixels,
709 * so one field will have htotal more pixels. In order to avoid
710 * the reported position from jumping backwards when the pixel
711 * counter is beyond the length of the shorter field, just
712 * clamp the position the length of the shorter field. This
713 * matches how the scanline counter based position works since
714 * the scanline counter doesn't count the two half lines.
716 if (position
>= vtotal
)
717 position
= vtotal
- 1;
720 * Start of vblank interrupt is triggered at start of hsync,
721 * just prior to the first active line of vblank. However we
722 * consider lines to start at the leading edge of horizontal
723 * active. So, should we get here before we've crossed into
724 * the horizontal active of the first line in vblank, we would
725 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
726 * always add htotal-hsync_start to the current pixel position.
728 position
= (position
+ htotal
- hsync_start
) % vtotal
;
731 /* Get optional system timestamp after query. */
733 *etime
= ktime_get();
735 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
737 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
739 in_vbl
= position
>= vbl_start
&& position
< vbl_end
;
742 * While in vblank, position will be negative
743 * counting up towards 0 at vbl_end. And outside
744 * vblank, position will be positive counting
747 if (position
>= vbl_start
)
750 position
+= vtotal
- vbl_end
;
752 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
756 *vpos
= position
/ htotal
;
757 *hpos
= position
- (*vpos
* htotal
);
762 ret
|= DRM_SCANOUTPOS_IN_VBLANK
;
767 int intel_get_crtc_scanline(struct intel_crtc
*crtc
)
769 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
770 unsigned long irqflags
;
773 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
774 position
= __intel_get_crtc_scanline(crtc
);
775 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
780 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
782 struct timeval
*vblank_time
,
785 struct drm_crtc
*crtc
;
787 if (pipe
< 0 || pipe
>= INTEL_INFO(dev
)->num_pipes
) {
788 DRM_ERROR("Invalid crtc %d\n", pipe
);
792 /* Get drm_crtc to timestamp: */
793 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
795 DRM_ERROR("Invalid crtc %d\n", pipe
);
799 if (!crtc
->state
->enable
) {
800 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
804 /* Helper routine in DRM core does all the work: */
805 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
808 &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
);
811 static bool intel_hpd_irq_event(struct drm_device
*dev
,
812 struct drm_connector
*connector
)
814 enum drm_connector_status old_status
;
816 WARN_ON(!mutex_is_locked(&dev
->mode_config
.mutex
));
817 old_status
= connector
->status
;
819 connector
->status
= connector
->funcs
->detect(connector
, false);
820 if (old_status
== connector
->status
)
823 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
826 drm_get_connector_status_name(old_status
),
827 drm_get_connector_status_name(connector
->status
));
832 static void i915_digport_work_func(struct work_struct
*work
)
834 struct drm_i915_private
*dev_priv
=
835 container_of(work
, struct drm_i915_private
, dig_port_work
);
836 u32 long_port_mask
, short_port_mask
;
837 struct intel_digital_port
*intel_dig_port
;
841 spin_lock_irq(&dev_priv
->irq_lock
);
842 long_port_mask
= dev_priv
->long_hpd_port_mask
;
843 dev_priv
->long_hpd_port_mask
= 0;
844 short_port_mask
= dev_priv
->short_hpd_port_mask
;
845 dev_priv
->short_hpd_port_mask
= 0;
846 spin_unlock_irq(&dev_priv
->irq_lock
);
848 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
850 bool long_hpd
= false;
851 intel_dig_port
= dev_priv
->hpd_irq_port
[i
];
852 if (!intel_dig_port
|| !intel_dig_port
->hpd_pulse
)
855 if (long_port_mask
& (1 << i
)) {
858 } else if (short_port_mask
& (1 << i
))
864 ret
= intel_dig_port
->hpd_pulse(intel_dig_port
, long_hpd
);
865 if (ret
== IRQ_NONE
) {
866 /* fall back to old school hpd */
867 old_bits
|= (1 << intel_dig_port
->base
.hpd_pin
);
873 spin_lock_irq(&dev_priv
->irq_lock
);
874 dev_priv
->hpd_event_bits
|= old_bits
;
875 spin_unlock_irq(&dev_priv
->irq_lock
);
876 schedule_work(&dev_priv
->hotplug_work
);
881 * Handle hotplug events outside the interrupt handler proper.
883 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
885 static void i915_hotplug_work_func(struct work_struct
*work
)
887 struct drm_i915_private
*dev_priv
=
888 container_of(work
, struct drm_i915_private
, hotplug_work
);
889 struct drm_device
*dev
= dev_priv
->dev
;
890 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
891 struct intel_connector
*intel_connector
;
892 struct intel_encoder
*intel_encoder
;
893 struct drm_connector
*connector
;
894 bool hpd_disabled
= false;
895 bool changed
= false;
898 mutex_lock(&mode_config
->mutex
);
899 DRM_DEBUG_KMS("running encoder hotplug functions\n");
901 spin_lock_irq(&dev_priv
->irq_lock
);
903 hpd_event_bits
= dev_priv
->hpd_event_bits
;
904 dev_priv
->hpd_event_bits
= 0;
905 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
906 intel_connector
= to_intel_connector(connector
);
907 if (!intel_connector
->encoder
)
909 intel_encoder
= intel_connector
->encoder
;
910 if (intel_encoder
->hpd_pin
> HPD_NONE
&&
911 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_MARK_DISABLED
&&
912 connector
->polled
== DRM_CONNECTOR_POLL_HPD
) {
913 DRM_INFO("HPD interrupt storm detected on connector %s: "
914 "switching from hotplug detection to polling\n",
916 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
= HPD_DISABLED
;
917 connector
->polled
= DRM_CONNECTOR_POLL_CONNECT
918 | DRM_CONNECTOR_POLL_DISCONNECT
;
921 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
922 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
923 connector
->name
, intel_encoder
->hpd_pin
);
926 /* if there were no outputs to poll, poll was disabled,
927 * therefore make sure it's enabled when disabling HPD on
930 drm_kms_helper_poll_enable(dev
);
931 mod_delayed_work(system_wq
, &dev_priv
->hotplug_reenable_work
,
932 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY
));
935 spin_unlock_irq(&dev_priv
->irq_lock
);
937 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
938 intel_connector
= to_intel_connector(connector
);
939 if (!intel_connector
->encoder
)
941 intel_encoder
= intel_connector
->encoder
;
942 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
943 if (intel_encoder
->hot_plug
)
944 intel_encoder
->hot_plug(intel_encoder
);
945 if (intel_hpd_irq_event(dev
, connector
))
949 mutex_unlock(&mode_config
->mutex
);
952 drm_kms_helper_hotplug_event(dev
);
955 static void ironlake_rps_change_irq_handler(struct drm_device
*dev
)
957 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
958 u32 busy_up
, busy_down
, max_avg
, min_avg
;
961 spin_lock(&mchdev_lock
);
963 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
965 new_delay
= dev_priv
->ips
.cur_delay
;
967 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
968 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
969 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
970 max_avg
= I915_READ(RCBMAXAVG
);
971 min_avg
= I915_READ(RCBMINAVG
);
973 /* Handle RCS change request from hw */
974 if (busy_up
> max_avg
) {
975 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
976 new_delay
= dev_priv
->ips
.cur_delay
- 1;
977 if (new_delay
< dev_priv
->ips
.max_delay
)
978 new_delay
= dev_priv
->ips
.max_delay
;
979 } else if (busy_down
< min_avg
) {
980 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
981 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
982 if (new_delay
> dev_priv
->ips
.min_delay
)
983 new_delay
= dev_priv
->ips
.min_delay
;
986 if (ironlake_set_drps(dev
, new_delay
))
987 dev_priv
->ips
.cur_delay
= new_delay
;
989 spin_unlock(&mchdev_lock
);
994 static void notify_ring(struct intel_engine_cs
*ring
)
996 if (!intel_ring_initialized(ring
))
999 trace_i915_gem_request_notify(ring
);
1001 wake_up_all(&ring
->irq_queue
);
1004 static void vlv_c0_read(struct drm_i915_private
*dev_priv
,
1005 struct intel_rps_ei
*ei
)
1007 ei
->cz_clock
= vlv_punit_read(dev_priv
, PUNIT_REG_CZ_TIMESTAMP
);
1008 ei
->render_c0
= I915_READ(VLV_RENDER_C0_COUNT
);
1009 ei
->media_c0
= I915_READ(VLV_MEDIA_C0_COUNT
);
1012 static bool vlv_c0_above(struct drm_i915_private
*dev_priv
,
1013 const struct intel_rps_ei
*old
,
1014 const struct intel_rps_ei
*now
,
1019 if (old
->cz_clock
== 0)
1022 time
= now
->cz_clock
- old
->cz_clock
;
1023 time
*= threshold
* dev_priv
->mem_freq
;
1025 /* Workload can be split between render + media, e.g. SwapBuffers
1026 * being blitted in X after being rendered in mesa. To account for
1027 * this we need to combine both engines into our activity counter.
1029 c0
= now
->render_c0
- old
->render_c0
;
1030 c0
+= now
->media_c0
- old
->media_c0
;
1031 c0
*= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC
* 4 / 1000;
1036 void gen6_rps_reset_ei(struct drm_i915_private
*dev_priv
)
1038 vlv_c0_read(dev_priv
, &dev_priv
->rps
.down_ei
);
1039 dev_priv
->rps
.up_ei
= dev_priv
->rps
.down_ei
;
1042 static u32
vlv_wa_c0_ei(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1044 struct intel_rps_ei now
;
1047 if ((pm_iir
& (GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_UP_EI_EXPIRED
)) == 0)
1050 vlv_c0_read(dev_priv
, &now
);
1051 if (now
.cz_clock
== 0)
1054 if (pm_iir
& GEN6_PM_RP_DOWN_EI_EXPIRED
) {
1055 if (!vlv_c0_above(dev_priv
,
1056 &dev_priv
->rps
.down_ei
, &now
,
1057 dev_priv
->rps
.down_threshold
))
1058 events
|= GEN6_PM_RP_DOWN_THRESHOLD
;
1059 dev_priv
->rps
.down_ei
= now
;
1062 if (pm_iir
& GEN6_PM_RP_UP_EI_EXPIRED
) {
1063 if (vlv_c0_above(dev_priv
,
1064 &dev_priv
->rps
.up_ei
, &now
,
1065 dev_priv
->rps
.up_threshold
))
1066 events
|= GEN6_PM_RP_UP_THRESHOLD
;
1067 dev_priv
->rps
.up_ei
= now
;
1073 static bool any_waiters(struct drm_i915_private
*dev_priv
)
1075 struct intel_engine_cs
*ring
;
1078 for_each_ring(ring
, dev_priv
, i
)
1079 if (ring
->irq_refcount
)
1085 static void gen6_pm_rps_work(struct work_struct
*work
)
1087 struct drm_i915_private
*dev_priv
=
1088 container_of(work
, struct drm_i915_private
, rps
.work
);
1090 int new_delay
, adj
, min
, max
;
1093 spin_lock_irq(&dev_priv
->irq_lock
);
1094 /* Speed up work cancelation during disabling rps interrupts. */
1095 if (!dev_priv
->rps
.interrupts_enabled
) {
1096 spin_unlock_irq(&dev_priv
->irq_lock
);
1099 pm_iir
= dev_priv
->rps
.pm_iir
;
1100 dev_priv
->rps
.pm_iir
= 0;
1101 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1102 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
1103 client_boost
= dev_priv
->rps
.client_boost
;
1104 dev_priv
->rps
.client_boost
= false;
1105 spin_unlock_irq(&dev_priv
->irq_lock
);
1107 /* Make sure we didn't queue anything we're not going to process. */
1108 WARN_ON(pm_iir
& ~dev_priv
->pm_rps_events
);
1110 if ((pm_iir
& dev_priv
->pm_rps_events
) == 0 && !client_boost
)
1113 mutex_lock(&dev_priv
->rps
.hw_lock
);
1115 pm_iir
|= vlv_wa_c0_ei(dev_priv
, pm_iir
);
1117 adj
= dev_priv
->rps
.last_adj
;
1118 new_delay
= dev_priv
->rps
.cur_freq
;
1119 min
= dev_priv
->rps
.min_freq_softlimit
;
1120 max
= dev_priv
->rps
.max_freq_softlimit
;
1123 new_delay
= dev_priv
->rps
.max_freq_softlimit
;
1125 } else if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
1128 else /* CHV needs even encode values */
1129 adj
= IS_CHERRYVIEW(dev_priv
) ? 2 : 1;
1131 * For better performance, jump directly
1132 * to RPe if we're below it.
1134 if (new_delay
< dev_priv
->rps
.efficient_freq
- adj
) {
1135 new_delay
= dev_priv
->rps
.efficient_freq
;
1138 } else if (any_waiters(dev_priv
)) {
1140 } else if (pm_iir
& GEN6_PM_RP_DOWN_TIMEOUT
) {
1141 if (dev_priv
->rps
.cur_freq
> dev_priv
->rps
.efficient_freq
)
1142 new_delay
= dev_priv
->rps
.efficient_freq
;
1144 new_delay
= dev_priv
->rps
.min_freq_softlimit
;
1146 } else if (pm_iir
& GEN6_PM_RP_DOWN_THRESHOLD
) {
1149 else /* CHV needs even encode values */
1150 adj
= IS_CHERRYVIEW(dev_priv
) ? -2 : -1;
1151 } else { /* unknown event */
1155 dev_priv
->rps
.last_adj
= adj
;
1157 /* sysfs frequency interfaces may have snuck in while servicing the
1161 new_delay
= clamp_t(int, new_delay
, min
, max
);
1163 intel_set_rps(dev_priv
->dev
, new_delay
);
1165 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1170 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1172 * @work: workqueue struct
1174 * Doesn't actually do anything except notify userspace. As a consequence of
1175 * this event, userspace should try to remap the bad rows since statistically
1176 * it is likely the same row is more likely to go bad again.
1178 static void ivybridge_parity_work(struct work_struct
*work
)
1180 struct drm_i915_private
*dev_priv
=
1181 container_of(work
, struct drm_i915_private
, l3_parity
.error_work
);
1182 u32 error_status
, row
, bank
, subbank
;
1183 char *parity_event
[6];
1187 /* We must turn off DOP level clock gating to access the L3 registers.
1188 * In order to prevent a get/put style interface, acquire struct mutex
1189 * any time we access those registers.
1191 mutex_lock(&dev_priv
->dev
->struct_mutex
);
1193 /* If we've screwed up tracking, just let the interrupt fire again */
1194 if (WARN_ON(!dev_priv
->l3_parity
.which_slice
))
1197 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
1198 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
1199 POSTING_READ(GEN7_MISCCPCTL
);
1201 while ((slice
= ffs(dev_priv
->l3_parity
.which_slice
)) != 0) {
1205 if (WARN_ON_ONCE(slice
>= NUM_L3_SLICES(dev_priv
->dev
)))
1208 dev_priv
->l3_parity
.which_slice
&= ~(1<<slice
);
1210 reg
= GEN7_L3CDERRST1
+ (slice
* 0x200);
1212 error_status
= I915_READ(reg
);
1213 row
= GEN7_PARITY_ERROR_ROW(error_status
);
1214 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
1215 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
1217 I915_WRITE(reg
, GEN7_PARITY_ERROR_VALID
| GEN7_L3CDERRST1_ENABLE
);
1220 parity_event
[0] = I915_L3_PARITY_UEVENT
"=1";
1221 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
1222 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
1223 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
1224 parity_event
[4] = kasprintf(GFP_KERNEL
, "SLICE=%d", slice
);
1225 parity_event
[5] = NULL
;
1227 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
->kobj
,
1228 KOBJ_CHANGE
, parity_event
);
1230 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1231 slice
, row
, bank
, subbank
);
1233 kfree(parity_event
[4]);
1234 kfree(parity_event
[3]);
1235 kfree(parity_event
[2]);
1236 kfree(parity_event
[1]);
1239 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
1242 WARN_ON(dev_priv
->l3_parity
.which_slice
);
1243 spin_lock_irq(&dev_priv
->irq_lock
);
1244 gen5_enable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev_priv
->dev
));
1245 spin_unlock_irq(&dev_priv
->irq_lock
);
1247 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
1250 static void ivybridge_parity_error_irq_handler(struct drm_device
*dev
, u32 iir
)
1252 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1254 if (!HAS_L3_DPF(dev
))
1257 spin_lock(&dev_priv
->irq_lock
);
1258 gen5_disable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev
));
1259 spin_unlock(&dev_priv
->irq_lock
);
1261 iir
&= GT_PARITY_ERROR(dev
);
1262 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1
)
1263 dev_priv
->l3_parity
.which_slice
|= 1 << 1;
1265 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT
)
1266 dev_priv
->l3_parity
.which_slice
|= 1 << 0;
1268 queue_work(dev_priv
->wq
, &dev_priv
->l3_parity
.error_work
);
1271 static void ilk_gt_irq_handler(struct drm_device
*dev
,
1272 struct drm_i915_private
*dev_priv
,
1276 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1277 notify_ring(&dev_priv
->ring
[RCS
]);
1278 if (gt_iir
& ILK_BSD_USER_INTERRUPT
)
1279 notify_ring(&dev_priv
->ring
[VCS
]);
1282 static void snb_gt_irq_handler(struct drm_device
*dev
,
1283 struct drm_i915_private
*dev_priv
,
1288 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1289 notify_ring(&dev_priv
->ring
[RCS
]);
1290 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
1291 notify_ring(&dev_priv
->ring
[VCS
]);
1292 if (gt_iir
& GT_BLT_USER_INTERRUPT
)
1293 notify_ring(&dev_priv
->ring
[BCS
]);
1295 if (gt_iir
& (GT_BLT_CS_ERROR_INTERRUPT
|
1296 GT_BSD_CS_ERROR_INTERRUPT
|
1297 GT_RENDER_CS_MASTER_ERROR_INTERRUPT
))
1298 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir
);
1300 if (gt_iir
& GT_PARITY_ERROR(dev
))
1301 ivybridge_parity_error_irq_handler(dev
, gt_iir
);
1304 static irqreturn_t
gen8_gt_irq_handler(struct drm_i915_private
*dev_priv
,
1307 irqreturn_t ret
= IRQ_NONE
;
1309 if (master_ctl
& (GEN8_GT_RCS_IRQ
| GEN8_GT_BCS_IRQ
)) {
1310 u32 tmp
= I915_READ_FW(GEN8_GT_IIR(0));
1312 I915_WRITE_FW(GEN8_GT_IIR(0), tmp
);
1315 if (tmp
& (GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
))
1316 intel_lrc_irq_handler(&dev_priv
->ring
[RCS
]);
1317 if (tmp
& (GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
))
1318 notify_ring(&dev_priv
->ring
[RCS
]);
1320 if (tmp
& (GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
))
1321 intel_lrc_irq_handler(&dev_priv
->ring
[BCS
]);
1322 if (tmp
& (GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
))
1323 notify_ring(&dev_priv
->ring
[BCS
]);
1325 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1328 if (master_ctl
& (GEN8_GT_VCS1_IRQ
| GEN8_GT_VCS2_IRQ
)) {
1329 u32 tmp
= I915_READ_FW(GEN8_GT_IIR(1));
1331 I915_WRITE_FW(GEN8_GT_IIR(1), tmp
);
1334 if (tmp
& (GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
))
1335 intel_lrc_irq_handler(&dev_priv
->ring
[VCS
]);
1336 if (tmp
& (GT_RENDER_USER_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
))
1337 notify_ring(&dev_priv
->ring
[VCS
]);
1339 if (tmp
& (GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
))
1340 intel_lrc_irq_handler(&dev_priv
->ring
[VCS2
]);
1341 if (tmp
& (GT_RENDER_USER_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
))
1342 notify_ring(&dev_priv
->ring
[VCS2
]);
1344 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1347 if (master_ctl
& GEN8_GT_VECS_IRQ
) {
1348 u32 tmp
= I915_READ_FW(GEN8_GT_IIR(3));
1350 I915_WRITE_FW(GEN8_GT_IIR(3), tmp
);
1353 if (tmp
& (GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
))
1354 intel_lrc_irq_handler(&dev_priv
->ring
[VECS
]);
1355 if (tmp
& (GT_RENDER_USER_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
))
1356 notify_ring(&dev_priv
->ring
[VECS
]);
1358 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1361 if (master_ctl
& GEN8_GT_PM_IRQ
) {
1362 u32 tmp
= I915_READ_FW(GEN8_GT_IIR(2));
1363 if (tmp
& dev_priv
->pm_rps_events
) {
1364 I915_WRITE_FW(GEN8_GT_IIR(2),
1365 tmp
& dev_priv
->pm_rps_events
);
1367 gen6_rps_irq_handler(dev_priv
, tmp
);
1369 DRM_ERROR("The master control interrupt lied (PM)!\n");
1375 #define HPD_STORM_DETECT_PERIOD 1000
1376 #define HPD_STORM_THRESHOLD 5
1378 static int pch_port_to_hotplug_shift(enum port port
)
1394 static int i915_port_to_hotplug_shift(enum port port
)
1410 static enum port
get_port_from_pin(enum hpd_pin pin
)
1420 return PORT_A
; /* no hpd */
1424 static void intel_hpd_irq_handler(struct drm_device
*dev
,
1425 u32 hotplug_trigger
,
1426 u32 dig_hotplug_reg
,
1427 const u32 hpd
[HPD_NUM_PINS
])
1429 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1432 bool storm_detected
= false;
1433 bool queue_dig
= false, queue_hp
= false;
1435 u32 dig_port_mask
= 0;
1437 if (!hotplug_trigger
)
1440 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1441 hotplug_trigger
, dig_hotplug_reg
);
1443 spin_lock(&dev_priv
->irq_lock
);
1444 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
1445 if (!(hpd
[i
] & hotplug_trigger
))
1448 port
= get_port_from_pin(i
);
1449 if (port
&& dev_priv
->hpd_irq_port
[port
]) {
1452 if (!HAS_GMCH_DISPLAY(dev_priv
)) {
1453 dig_shift
= pch_port_to_hotplug_shift(port
);
1454 long_hpd
= (dig_hotplug_reg
>> dig_shift
) & PORTB_HOTPLUG_LONG_DETECT
;
1456 dig_shift
= i915_port_to_hotplug_shift(port
);
1457 long_hpd
= (hotplug_trigger
>> dig_shift
) & PORTB_HOTPLUG_LONG_DETECT
;
1460 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1462 long_hpd
? "long" : "short");
1463 /* for long HPD pulses we want to have the digital queue happen,
1464 but we still want HPD storm detection to function. */
1466 dev_priv
->long_hpd_port_mask
|= (1 << port
);
1467 dig_port_mask
|= hpd
[i
];
1469 /* for short HPD just trigger the digital queue */
1470 dev_priv
->short_hpd_port_mask
|= (1 << port
);
1471 hotplug_trigger
&= ~hpd
[i
];
1477 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
1478 if (hpd
[i
] & hotplug_trigger
&&
1479 dev_priv
->hpd_stats
[i
].hpd_mark
== HPD_DISABLED
) {
1481 * On GMCH platforms the interrupt mask bits only
1482 * prevent irq generation, not the setting of the
1483 * hotplug bits itself. So only WARN about unexpected
1484 * interrupts on saner platforms.
1486 WARN_ONCE(INTEL_INFO(dev
)->gen
>= 5 && !IS_VALLEYVIEW(dev
),
1487 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1488 hotplug_trigger
, i
, hpd
[i
]);
1493 if (!(hpd
[i
] & hotplug_trigger
) ||
1494 dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_ENABLED
)
1497 if (!(dig_port_mask
& hpd
[i
])) {
1498 dev_priv
->hpd_event_bits
|= (1 << i
);
1502 if (!time_in_range(jiffies
, dev_priv
->hpd_stats
[i
].hpd_last_jiffies
,
1503 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
1504 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD
))) {
1505 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
= jiffies
;
1506 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
1507 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i
);
1508 } else if (dev_priv
->hpd_stats
[i
].hpd_cnt
> HPD_STORM_THRESHOLD
) {
1509 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_MARK_DISABLED
;
1510 dev_priv
->hpd_event_bits
&= ~(1 << i
);
1511 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i
);
1512 storm_detected
= true;
1514 dev_priv
->hpd_stats
[i
].hpd_cnt
++;
1515 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i
,
1516 dev_priv
->hpd_stats
[i
].hpd_cnt
);
1521 dev_priv
->display
.hpd_irq_setup(dev
);
1522 spin_unlock(&dev_priv
->irq_lock
);
1525 * Our hotplug handler can grab modeset locks (by calling down into the
1526 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1527 * queue for otherwise the flush_work in the pageflip code will
1531 queue_work(dev_priv
->dp_wq
, &dev_priv
->dig_port_work
);
1533 schedule_work(&dev_priv
->hotplug_work
);
1536 static void gmbus_irq_handler(struct drm_device
*dev
)
1538 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1540 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1543 static void dp_aux_irq_handler(struct drm_device
*dev
)
1545 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1547 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1550 #if defined(CONFIG_DEBUG_FS)
1551 static void display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1552 uint32_t crc0
, uint32_t crc1
,
1553 uint32_t crc2
, uint32_t crc3
,
1556 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1557 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
1558 struct intel_pipe_crc_entry
*entry
;
1561 spin_lock(&pipe_crc
->lock
);
1563 if (!pipe_crc
->entries
) {
1564 spin_unlock(&pipe_crc
->lock
);
1565 DRM_DEBUG_KMS("spurious interrupt\n");
1569 head
= pipe_crc
->head
;
1570 tail
= pipe_crc
->tail
;
1572 if (CIRC_SPACE(head
, tail
, INTEL_PIPE_CRC_ENTRIES_NR
) < 1) {
1573 spin_unlock(&pipe_crc
->lock
);
1574 DRM_ERROR("CRC buffer overflowing\n");
1578 entry
= &pipe_crc
->entries
[head
];
1580 entry
->frame
= dev
->driver
->get_vblank_counter(dev
, pipe
);
1581 entry
->crc
[0] = crc0
;
1582 entry
->crc
[1] = crc1
;
1583 entry
->crc
[2] = crc2
;
1584 entry
->crc
[3] = crc3
;
1585 entry
->crc
[4] = crc4
;
1587 head
= (head
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
1588 pipe_crc
->head
= head
;
1590 spin_unlock(&pipe_crc
->lock
);
1592 wake_up_interruptible(&pipe_crc
->wq
);
1596 display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1597 uint32_t crc0
, uint32_t crc1
,
1598 uint32_t crc2
, uint32_t crc3
,
1603 static void hsw_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1605 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1607 display_pipe_crc_irq_handler(dev
, pipe
,
1608 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1612 static void ivb_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1614 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1616 display_pipe_crc_irq_handler(dev
, pipe
,
1617 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1618 I915_READ(PIPE_CRC_RES_2_IVB(pipe
)),
1619 I915_READ(PIPE_CRC_RES_3_IVB(pipe
)),
1620 I915_READ(PIPE_CRC_RES_4_IVB(pipe
)),
1621 I915_READ(PIPE_CRC_RES_5_IVB(pipe
)));
1624 static void i9xx_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1626 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1627 uint32_t res1
, res2
;
1629 if (INTEL_INFO(dev
)->gen
>= 3)
1630 res1
= I915_READ(PIPE_CRC_RES_RES1_I915(pipe
));
1634 if (INTEL_INFO(dev
)->gen
>= 5 || IS_G4X(dev
))
1635 res2
= I915_READ(PIPE_CRC_RES_RES2_G4X(pipe
));
1639 display_pipe_crc_irq_handler(dev
, pipe
,
1640 I915_READ(PIPE_CRC_RES_RED(pipe
)),
1641 I915_READ(PIPE_CRC_RES_GREEN(pipe
)),
1642 I915_READ(PIPE_CRC_RES_BLUE(pipe
)),
1646 /* The RPS events need forcewake, so we add them to a work queue and mask their
1647 * IMR bits until the work is done. Other interrupts can be processed without
1648 * the work queue. */
1649 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1651 if (pm_iir
& dev_priv
->pm_rps_events
) {
1652 spin_lock(&dev_priv
->irq_lock
);
1653 gen6_disable_pm_irq(dev_priv
, pm_iir
& dev_priv
->pm_rps_events
);
1654 if (dev_priv
->rps
.interrupts_enabled
) {
1655 dev_priv
->rps
.pm_iir
|= pm_iir
& dev_priv
->pm_rps_events
;
1656 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
1658 spin_unlock(&dev_priv
->irq_lock
);
1661 if (INTEL_INFO(dev_priv
)->gen
>= 8)
1664 if (HAS_VEBOX(dev_priv
->dev
)) {
1665 if (pm_iir
& PM_VEBOX_USER_INTERRUPT
)
1666 notify_ring(&dev_priv
->ring
[VECS
]);
1668 if (pm_iir
& PM_VEBOX_CS_ERROR_INTERRUPT
)
1669 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir
);
1673 static bool intel_pipe_handle_vblank(struct drm_device
*dev
, enum pipe pipe
)
1675 if (!drm_handle_vblank(dev
, pipe
))
1681 static void valleyview_pipestat_irq_handler(struct drm_device
*dev
, u32 iir
)
1683 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1684 u32 pipe_stats
[I915_MAX_PIPES
] = { };
1687 spin_lock(&dev_priv
->irq_lock
);
1688 for_each_pipe(dev_priv
, pipe
) {
1690 u32 mask
, iir_bit
= 0;
1693 * PIPESTAT bits get signalled even when the interrupt is
1694 * disabled with the mask bits, and some of the status bits do
1695 * not generate interrupts at all (like the underrun bit). Hence
1696 * we need to be careful that we only handle what we want to
1700 /* fifo underruns are filterered in the underrun handler. */
1701 mask
= PIPE_FIFO_UNDERRUN_STATUS
;
1705 iir_bit
= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
;
1708 iir_bit
= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
1711 iir_bit
= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
1715 mask
|= dev_priv
->pipestat_irq_mask
[pipe
];
1720 reg
= PIPESTAT(pipe
);
1721 mask
|= PIPESTAT_INT_ENABLE_MASK
;
1722 pipe_stats
[pipe
] = I915_READ(reg
) & mask
;
1725 * Clear the PIPE*STAT regs before the IIR
1727 if (pipe_stats
[pipe
] & (PIPE_FIFO_UNDERRUN_STATUS
|
1728 PIPESTAT_INT_STATUS_MASK
))
1729 I915_WRITE(reg
, pipe_stats
[pipe
]);
1731 spin_unlock(&dev_priv
->irq_lock
);
1733 for_each_pipe(dev_priv
, pipe
) {
1734 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
1735 intel_pipe_handle_vblank(dev
, pipe
))
1736 intel_check_page_flip(dev
, pipe
);
1738 if (pipe_stats
[pipe
] & PLANE_FLIP_DONE_INT_STATUS_VLV
) {
1739 intel_prepare_page_flip(dev
, pipe
);
1740 intel_finish_page_flip(dev
, pipe
);
1743 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
1744 i9xx_pipe_crc_irq_handler(dev
, pipe
);
1746 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
1747 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
1750 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
1751 gmbus_irq_handler(dev
);
1754 static void i9xx_hpd_irq_handler(struct drm_device
*dev
)
1756 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1757 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
1759 if (hotplug_status
) {
1760 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
1762 * Make sure hotplug status is cleared before we clear IIR, or else we
1763 * may miss hotplug events.
1765 POSTING_READ(PORT_HOTPLUG_STAT
);
1767 if (IS_G4X(dev
) || IS_VALLEYVIEW(dev
)) {
1768 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_G4X
;
1770 intel_hpd_irq_handler(dev
, hotplug_trigger
, 0, hpd_status_g4x
);
1772 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
1774 intel_hpd_irq_handler(dev
, hotplug_trigger
, 0, hpd_status_i915
);
1777 if ((IS_G4X(dev
) || IS_VALLEYVIEW(dev
)) &&
1778 hotplug_status
& DP_AUX_CHANNEL_MASK_INT_STATUS_G4X
)
1779 dp_aux_irq_handler(dev
);
1783 static irqreturn_t
valleyview_irq_handler(int irq
, void *arg
)
1785 struct drm_device
*dev
= arg
;
1786 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1787 u32 iir
, gt_iir
, pm_iir
;
1788 irqreturn_t ret
= IRQ_NONE
;
1790 if (!intel_irqs_enabled(dev_priv
))
1794 /* Find, clear, then process each source of interrupt */
1796 gt_iir
= I915_READ(GTIIR
);
1798 I915_WRITE(GTIIR
, gt_iir
);
1800 pm_iir
= I915_READ(GEN6_PMIIR
);
1802 I915_WRITE(GEN6_PMIIR
, pm_iir
);
1804 iir
= I915_READ(VLV_IIR
);
1806 /* Consume port before clearing IIR or we'll miss events */
1807 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
1808 i9xx_hpd_irq_handler(dev
);
1809 I915_WRITE(VLV_IIR
, iir
);
1812 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
1818 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1820 gen6_rps_irq_handler(dev_priv
, pm_iir
);
1821 /* Call regardless, as some status bits might not be
1822 * signalled in iir */
1823 valleyview_pipestat_irq_handler(dev
, iir
);
1830 static irqreturn_t
cherryview_irq_handler(int irq
, void *arg
)
1832 struct drm_device
*dev
= arg
;
1833 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1834 u32 master_ctl
, iir
;
1835 irqreturn_t ret
= IRQ_NONE
;
1837 if (!intel_irqs_enabled(dev_priv
))
1841 master_ctl
= I915_READ(GEN8_MASTER_IRQ
) & ~GEN8_MASTER_IRQ_CONTROL
;
1842 iir
= I915_READ(VLV_IIR
);
1844 if (master_ctl
== 0 && iir
== 0)
1849 I915_WRITE(GEN8_MASTER_IRQ
, 0);
1851 /* Find, clear, then process each source of interrupt */
1854 /* Consume port before clearing IIR or we'll miss events */
1855 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
1856 i9xx_hpd_irq_handler(dev
);
1857 I915_WRITE(VLV_IIR
, iir
);
1860 gen8_gt_irq_handler(dev_priv
, master_ctl
);
1862 /* Call regardless, as some status bits might not be
1863 * signalled in iir */
1864 valleyview_pipestat_irq_handler(dev
, iir
);
1866 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
1867 POSTING_READ(GEN8_MASTER_IRQ
);
1873 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1875 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1877 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK
;
1878 u32 dig_hotplug_reg
;
1880 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
1881 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
1883 intel_hpd_irq_handler(dev
, hotplug_trigger
, dig_hotplug_reg
, hpd_ibx
);
1885 if (pch_iir
& SDE_AUDIO_POWER_MASK
) {
1886 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK
) >>
1887 SDE_AUDIO_POWER_SHIFT
);
1888 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1892 if (pch_iir
& SDE_AUX_MASK
)
1893 dp_aux_irq_handler(dev
);
1895 if (pch_iir
& SDE_GMBUS
)
1896 gmbus_irq_handler(dev
);
1898 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
1899 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1901 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
1902 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1904 if (pch_iir
& SDE_POISON
)
1905 DRM_ERROR("PCH poison interrupt\n");
1907 if (pch_iir
& SDE_FDI_MASK
)
1908 for_each_pipe(dev_priv
, pipe
)
1909 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1911 I915_READ(FDI_RX_IIR(pipe
)));
1913 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
1914 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1916 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
1917 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1919 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
1920 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_A
);
1922 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
1923 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_B
);
1926 static void ivb_err_int_handler(struct drm_device
*dev
)
1928 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1929 u32 err_int
= I915_READ(GEN7_ERR_INT
);
1932 if (err_int
& ERR_INT_POISON
)
1933 DRM_ERROR("Poison interrupt\n");
1935 for_each_pipe(dev_priv
, pipe
) {
1936 if (err_int
& ERR_INT_FIFO_UNDERRUN(pipe
))
1937 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
1939 if (err_int
& ERR_INT_PIPE_CRC_DONE(pipe
)) {
1940 if (IS_IVYBRIDGE(dev
))
1941 ivb_pipe_crc_irq_handler(dev
, pipe
);
1943 hsw_pipe_crc_irq_handler(dev
, pipe
);
1947 I915_WRITE(GEN7_ERR_INT
, err_int
);
1950 static void cpt_serr_int_handler(struct drm_device
*dev
)
1952 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1953 u32 serr_int
= I915_READ(SERR_INT
);
1955 if (serr_int
& SERR_INT_POISON
)
1956 DRM_ERROR("PCH poison interrupt\n");
1958 if (serr_int
& SERR_INT_TRANS_A_FIFO_UNDERRUN
)
1959 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_A
);
1961 if (serr_int
& SERR_INT_TRANS_B_FIFO_UNDERRUN
)
1962 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_B
);
1964 if (serr_int
& SERR_INT_TRANS_C_FIFO_UNDERRUN
)
1965 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_C
);
1967 I915_WRITE(SERR_INT
, serr_int
);
1970 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1972 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1974 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK_CPT
;
1975 u32 dig_hotplug_reg
;
1977 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
1978 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
1980 intel_hpd_irq_handler(dev
, hotplug_trigger
, dig_hotplug_reg
, hpd_cpt
);
1982 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) {
1983 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
1984 SDE_AUDIO_POWER_SHIFT_CPT
);
1985 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1989 if (pch_iir
& SDE_AUX_MASK_CPT
)
1990 dp_aux_irq_handler(dev
);
1992 if (pch_iir
& SDE_GMBUS_CPT
)
1993 gmbus_irq_handler(dev
);
1995 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
1996 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1998 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
1999 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2001 if (pch_iir
& SDE_FDI_MASK_CPT
)
2002 for_each_pipe(dev_priv
, pipe
)
2003 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2005 I915_READ(FDI_RX_IIR(pipe
)));
2007 if (pch_iir
& SDE_ERROR_CPT
)
2008 cpt_serr_int_handler(dev
);
2011 static void ilk_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
2013 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2016 if (de_iir
& DE_AUX_CHANNEL_A
)
2017 dp_aux_irq_handler(dev
);
2019 if (de_iir
& DE_GSE
)
2020 intel_opregion_asle_intr(dev
);
2022 if (de_iir
& DE_POISON
)
2023 DRM_ERROR("Poison interrupt\n");
2025 for_each_pipe(dev_priv
, pipe
) {
2026 if (de_iir
& DE_PIPE_VBLANK(pipe
) &&
2027 intel_pipe_handle_vblank(dev
, pipe
))
2028 intel_check_page_flip(dev
, pipe
);
2030 if (de_iir
& DE_PIPE_FIFO_UNDERRUN(pipe
))
2031 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
2033 if (de_iir
& DE_PIPE_CRC_DONE(pipe
))
2034 i9xx_pipe_crc_irq_handler(dev
, pipe
);
2036 /* plane/pipes map 1:1 on ilk+ */
2037 if (de_iir
& DE_PLANE_FLIP_DONE(pipe
)) {
2038 intel_prepare_page_flip(dev
, pipe
);
2039 intel_finish_page_flip_plane(dev
, pipe
);
2043 /* check event from PCH */
2044 if (de_iir
& DE_PCH_EVENT
) {
2045 u32 pch_iir
= I915_READ(SDEIIR
);
2047 if (HAS_PCH_CPT(dev
))
2048 cpt_irq_handler(dev
, pch_iir
);
2050 ibx_irq_handler(dev
, pch_iir
);
2052 /* should clear PCH hotplug event before clear CPU irq */
2053 I915_WRITE(SDEIIR
, pch_iir
);
2056 if (IS_GEN5(dev
) && de_iir
& DE_PCU_EVENT
)
2057 ironlake_rps_change_irq_handler(dev
);
2060 static void ivb_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
2062 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2065 if (de_iir
& DE_ERR_INT_IVB
)
2066 ivb_err_int_handler(dev
);
2068 if (de_iir
& DE_AUX_CHANNEL_A_IVB
)
2069 dp_aux_irq_handler(dev
);
2071 if (de_iir
& DE_GSE_IVB
)
2072 intel_opregion_asle_intr(dev
);
2074 for_each_pipe(dev_priv
, pipe
) {
2075 if (de_iir
& (DE_PIPE_VBLANK_IVB(pipe
)) &&
2076 intel_pipe_handle_vblank(dev
, pipe
))
2077 intel_check_page_flip(dev
, pipe
);
2079 /* plane/pipes map 1:1 on ilk+ */
2080 if (de_iir
& DE_PLANE_FLIP_DONE_IVB(pipe
)) {
2081 intel_prepare_page_flip(dev
, pipe
);
2082 intel_finish_page_flip_plane(dev
, pipe
);
2086 /* check event from PCH */
2087 if (!HAS_PCH_NOP(dev
) && (de_iir
& DE_PCH_EVENT_IVB
)) {
2088 u32 pch_iir
= I915_READ(SDEIIR
);
2090 cpt_irq_handler(dev
, pch_iir
);
2092 /* clear PCH hotplug event before clear CPU irq */
2093 I915_WRITE(SDEIIR
, pch_iir
);
2098 * To handle irqs with the minimum potential races with fresh interrupts, we:
2099 * 1 - Disable Master Interrupt Control.
2100 * 2 - Find the source(s) of the interrupt.
2101 * 3 - Clear the Interrupt Identity bits (IIR).
2102 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2103 * 5 - Re-enable Master Interrupt Control.
2105 static irqreturn_t
ironlake_irq_handler(int irq
, void *arg
)
2107 struct drm_device
*dev
= arg
;
2108 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2109 u32 de_iir
, gt_iir
, de_ier
, sde_ier
= 0;
2110 irqreturn_t ret
= IRQ_NONE
;
2112 if (!intel_irqs_enabled(dev_priv
))
2115 /* We get interrupts on unclaimed registers, so check for this before we
2116 * do any I915_{READ,WRITE}. */
2117 intel_uncore_check_errors(dev
);
2119 /* disable master interrupt before clearing iir */
2120 de_ier
= I915_READ(DEIER
);
2121 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
2122 POSTING_READ(DEIER
);
2124 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2125 * interrupts will will be stored on its back queue, and then we'll be
2126 * able to process them after we restore SDEIER (as soon as we restore
2127 * it, we'll get an interrupt if SDEIIR still has something to process
2128 * due to its back queue). */
2129 if (!HAS_PCH_NOP(dev
)) {
2130 sde_ier
= I915_READ(SDEIER
);
2131 I915_WRITE(SDEIER
, 0);
2132 POSTING_READ(SDEIER
);
2135 /* Find, clear, then process each source of interrupt */
2137 gt_iir
= I915_READ(GTIIR
);
2139 I915_WRITE(GTIIR
, gt_iir
);
2141 if (INTEL_INFO(dev
)->gen
>= 6)
2142 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2144 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2147 de_iir
= I915_READ(DEIIR
);
2149 I915_WRITE(DEIIR
, de_iir
);
2151 if (INTEL_INFO(dev
)->gen
>= 7)
2152 ivb_display_irq_handler(dev
, de_iir
);
2154 ilk_display_irq_handler(dev
, de_iir
);
2157 if (INTEL_INFO(dev
)->gen
>= 6) {
2158 u32 pm_iir
= I915_READ(GEN6_PMIIR
);
2160 I915_WRITE(GEN6_PMIIR
, pm_iir
);
2162 gen6_rps_irq_handler(dev_priv
, pm_iir
);
2166 I915_WRITE(DEIER
, de_ier
);
2167 POSTING_READ(DEIER
);
2168 if (!HAS_PCH_NOP(dev
)) {
2169 I915_WRITE(SDEIER
, sde_ier
);
2170 POSTING_READ(SDEIER
);
2176 static void bxt_hpd_handler(struct drm_device
*dev
, uint32_t iir_status
)
2178 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2179 uint32_t hp_control
;
2180 uint32_t hp_trigger
;
2182 /* Get the status */
2183 hp_trigger
= iir_status
& BXT_DE_PORT_HOTPLUG_MASK
;
2184 hp_control
= I915_READ(BXT_HOTPLUG_CTL
);
2186 /* Hotplug not enabled ? */
2187 if (!(hp_control
& BXT_HOTPLUG_CTL_MASK
)) {
2188 DRM_ERROR("Interrupt when HPD disabled\n");
2192 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2193 hp_control
& BXT_HOTPLUG_CTL_MASK
);
2195 /* Check for HPD storm and schedule bottom half */
2196 intel_hpd_irq_handler(dev
, hp_trigger
, hp_control
, hpd_bxt
);
2199 * FIXME: Save the hot plug status for bottom half before
2200 * clearing the sticky status bits, else the status will be
2204 /* Clear sticky bits in hpd status */
2205 I915_WRITE(BXT_HOTPLUG_CTL
, hp_control
);
2208 static irqreturn_t
gen8_irq_handler(int irq
, void *arg
)
2210 struct drm_device
*dev
= arg
;
2211 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2213 irqreturn_t ret
= IRQ_NONE
;
2216 u32 aux_mask
= GEN8_AUX_CHANNEL_A
;
2218 if (!intel_irqs_enabled(dev_priv
))
2222 aux_mask
|= GEN9_AUX_CHANNEL_B
| GEN9_AUX_CHANNEL_C
|
2225 master_ctl
= I915_READ_FW(GEN8_MASTER_IRQ
);
2226 master_ctl
&= ~GEN8_MASTER_IRQ_CONTROL
;
2230 I915_WRITE_FW(GEN8_MASTER_IRQ
, 0);
2232 /* Find, clear, then process each source of interrupt */
2234 ret
= gen8_gt_irq_handler(dev_priv
, master_ctl
);
2236 if (master_ctl
& GEN8_DE_MISC_IRQ
) {
2237 tmp
= I915_READ(GEN8_DE_MISC_IIR
);
2239 I915_WRITE(GEN8_DE_MISC_IIR
, tmp
);
2241 if (tmp
& GEN8_DE_MISC_GSE
)
2242 intel_opregion_asle_intr(dev
);
2244 DRM_ERROR("Unexpected DE Misc interrupt\n");
2247 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2250 if (master_ctl
& GEN8_DE_PORT_IRQ
) {
2251 tmp
= I915_READ(GEN8_DE_PORT_IIR
);
2255 I915_WRITE(GEN8_DE_PORT_IIR
, tmp
);
2258 if (tmp
& aux_mask
) {
2259 dp_aux_irq_handler(dev
);
2263 if (IS_BROXTON(dev
) && tmp
& BXT_DE_PORT_HOTPLUG_MASK
) {
2264 bxt_hpd_handler(dev
, tmp
);
2268 if (IS_BROXTON(dev
) && (tmp
& BXT_DE_PORT_GMBUS
)) {
2269 gmbus_irq_handler(dev
);
2274 DRM_ERROR("Unexpected DE Port interrupt\n");
2277 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2280 for_each_pipe(dev_priv
, pipe
) {
2281 uint32_t pipe_iir
, flip_done
= 0, fault_errors
= 0;
2283 if (!(master_ctl
& GEN8_DE_PIPE_IRQ(pipe
)))
2286 pipe_iir
= I915_READ(GEN8_DE_PIPE_IIR(pipe
));
2289 I915_WRITE(GEN8_DE_PIPE_IIR(pipe
), pipe_iir
);
2291 if (pipe_iir
& GEN8_PIPE_VBLANK
&&
2292 intel_pipe_handle_vblank(dev
, pipe
))
2293 intel_check_page_flip(dev
, pipe
);
2296 flip_done
= pipe_iir
& GEN9_PIPE_PLANE1_FLIP_DONE
;
2298 flip_done
= pipe_iir
& GEN8_PIPE_PRIMARY_FLIP_DONE
;
2301 intel_prepare_page_flip(dev
, pipe
);
2302 intel_finish_page_flip_plane(dev
, pipe
);
2305 if (pipe_iir
& GEN8_PIPE_CDCLK_CRC_DONE
)
2306 hsw_pipe_crc_irq_handler(dev
, pipe
);
2308 if (pipe_iir
& GEN8_PIPE_FIFO_UNDERRUN
)
2309 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
2314 fault_errors
= pipe_iir
& GEN9_DE_PIPE_IRQ_FAULT_ERRORS
;
2316 fault_errors
= pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
2319 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2321 pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
);
2323 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2326 if (HAS_PCH_SPLIT(dev
) && !HAS_PCH_NOP(dev
) &&
2327 master_ctl
& GEN8_DE_PCH_IRQ
) {
2329 * FIXME(BDW): Assume for now that the new interrupt handling
2330 * scheme also closed the SDE interrupt handling race we've seen
2331 * on older pch-split platforms. But this needs testing.
2333 u32 pch_iir
= I915_READ(SDEIIR
);
2335 I915_WRITE(SDEIIR
, pch_iir
);
2337 cpt_irq_handler(dev
, pch_iir
);
2339 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2343 I915_WRITE_FW(GEN8_MASTER_IRQ
, GEN8_MASTER_IRQ_CONTROL
);
2344 POSTING_READ_FW(GEN8_MASTER_IRQ
);
2349 static void i915_error_wake_up(struct drm_i915_private
*dev_priv
,
2350 bool reset_completed
)
2352 struct intel_engine_cs
*ring
;
2356 * Notify all waiters for GPU completion events that reset state has
2357 * been changed, and that they need to restart their wait after
2358 * checking for potential errors (and bail out to drop locks if there is
2359 * a gpu reset pending so that i915_error_work_func can acquire them).
2362 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2363 for_each_ring(ring
, dev_priv
, i
)
2364 wake_up_all(&ring
->irq_queue
);
2366 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2367 wake_up_all(&dev_priv
->pending_flip_queue
);
2370 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2371 * reset state is cleared.
2373 if (reset_completed
)
2374 wake_up_all(&dev_priv
->gpu_error
.reset_queue
);
2378 * i915_reset_and_wakeup - do process context error handling work
2380 * Fire an error uevent so userspace can see that a hang or error
2383 static void i915_reset_and_wakeup(struct drm_device
*dev
)
2385 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2386 struct i915_gpu_error
*error
= &dev_priv
->gpu_error
;
2387 char *error_event
[] = { I915_ERROR_UEVENT
"=1", NULL
};
2388 char *reset_event
[] = { I915_RESET_UEVENT
"=1", NULL
};
2389 char *reset_done_event
[] = { I915_ERROR_UEVENT
"=0", NULL
};
2392 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
, error_event
);
2395 * Note that there's only one work item which does gpu resets, so we
2396 * need not worry about concurrent gpu resets potentially incrementing
2397 * error->reset_counter twice. We only need to take care of another
2398 * racing irq/hangcheck declaring the gpu dead for a second time. A
2399 * quick check for that is good enough: schedule_work ensures the
2400 * correct ordering between hang detection and this work item, and since
2401 * the reset in-progress bit is only ever set by code outside of this
2402 * work we don't need to worry about any other races.
2404 if (i915_reset_in_progress(error
) && !i915_terminally_wedged(error
)) {
2405 DRM_DEBUG_DRIVER("resetting chip\n");
2406 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
,
2410 * In most cases it's guaranteed that we get here with an RPM
2411 * reference held, for example because there is a pending GPU
2412 * request that won't finish until the reset is done. This
2413 * isn't the case at least when we get here by doing a
2414 * simulated reset via debugs, so get an RPM reference.
2416 intel_runtime_pm_get(dev_priv
);
2418 intel_prepare_reset(dev
);
2421 * All state reset _must_ be completed before we update the
2422 * reset counter, for otherwise waiters might miss the reset
2423 * pending state and not properly drop locks, resulting in
2424 * deadlocks with the reset work.
2426 ret
= i915_reset(dev
);
2428 intel_finish_reset(dev
);
2430 intel_runtime_pm_put(dev_priv
);
2434 * After all the gem state is reset, increment the reset
2435 * counter and wake up everyone waiting for the reset to
2438 * Since unlock operations are a one-sided barrier only,
2439 * we need to insert a barrier here to order any seqno
2441 * the counter increment.
2443 smp_mb__before_atomic();
2444 atomic_inc(&dev_priv
->gpu_error
.reset_counter
);
2446 kobject_uevent_env(&dev
->primary
->kdev
->kobj
,
2447 KOBJ_CHANGE
, reset_done_event
);
2449 atomic_set_mask(I915_WEDGED
, &error
->reset_counter
);
2453 * Note: The wake_up also serves as a memory barrier so that
2454 * waiters see the update value of the reset counter atomic_t.
2456 i915_error_wake_up(dev_priv
, true);
2460 static void i915_report_and_clear_eir(struct drm_device
*dev
)
2462 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2463 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
2464 u32 eir
= I915_READ(EIR
);
2470 pr_err("render error detected, EIR: 0x%08x\n", eir
);
2472 i915_get_extra_instdone(dev
, instdone
);
2475 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
2476 u32 ipeir
= I915_READ(IPEIR_I965
);
2478 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2479 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2480 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2481 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2482 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2483 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2484 I915_WRITE(IPEIR_I965
, ipeir
);
2485 POSTING_READ(IPEIR_I965
);
2487 if (eir
& GM45_ERROR_PAGE_TABLE
) {
2488 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2489 pr_err("page table error\n");
2490 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2491 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2492 POSTING_READ(PGTBL_ER
);
2496 if (!IS_GEN2(dev
)) {
2497 if (eir
& I915_ERROR_PAGE_TABLE
) {
2498 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2499 pr_err("page table error\n");
2500 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2501 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2502 POSTING_READ(PGTBL_ER
);
2506 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
2507 pr_err("memory refresh error:\n");
2508 for_each_pipe(dev_priv
, pipe
)
2509 pr_err("pipe %c stat: 0x%08x\n",
2510 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
2511 /* pipestat has already been acked */
2513 if (eir
& I915_ERROR_INSTRUCTION
) {
2514 pr_err("instruction error\n");
2515 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
2516 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2517 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2518 if (INTEL_INFO(dev
)->gen
< 4) {
2519 u32 ipeir
= I915_READ(IPEIR
);
2521 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
2522 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
2523 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
2524 I915_WRITE(IPEIR
, ipeir
);
2525 POSTING_READ(IPEIR
);
2527 u32 ipeir
= I915_READ(IPEIR_I965
);
2529 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2530 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2531 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2532 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2533 I915_WRITE(IPEIR_I965
, ipeir
);
2534 POSTING_READ(IPEIR_I965
);
2538 I915_WRITE(EIR
, eir
);
2540 eir
= I915_READ(EIR
);
2543 * some errors might have become stuck,
2546 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
2547 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
2548 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2553 * i915_handle_error - handle a gpu error
2556 * Do some basic checking of regsiter state at error time and
2557 * dump it to the syslog. Also call i915_capture_error_state() to make
2558 * sure we get a record and make it available in debugfs. Fire a uevent
2559 * so userspace knows something bad happened (should trigger collection
2560 * of a ring dump etc.).
2562 void i915_handle_error(struct drm_device
*dev
, bool wedged
,
2563 const char *fmt
, ...)
2565 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2569 va_start(args
, fmt
);
2570 vscnprintf(error_msg
, sizeof(error_msg
), fmt
, args
);
2573 i915_capture_error_state(dev
, wedged
, error_msg
);
2574 i915_report_and_clear_eir(dev
);
2577 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG
,
2578 &dev_priv
->gpu_error
.reset_counter
);
2581 * Wakeup waiting processes so that the reset function
2582 * i915_reset_and_wakeup doesn't deadlock trying to grab
2583 * various locks. By bumping the reset counter first, the woken
2584 * processes will see a reset in progress and back off,
2585 * releasing their locks and then wait for the reset completion.
2586 * We must do this for _all_ gpu waiters that might hold locks
2587 * that the reset work needs to acquire.
2589 * Note: The wake_up serves as the required memory barrier to
2590 * ensure that the waiters see the updated value of the reset
2593 i915_error_wake_up(dev_priv
, false);
2596 i915_reset_and_wakeup(dev
);
2599 /* Called from drm generic code, passed 'crtc' which
2600 * we use as a pipe index
2602 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
2604 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2605 unsigned long irqflags
;
2607 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2608 if (INTEL_INFO(dev
)->gen
>= 4)
2609 i915_enable_pipestat(dev_priv
, pipe
,
2610 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2612 i915_enable_pipestat(dev_priv
, pipe
,
2613 PIPE_VBLANK_INTERRUPT_STATUS
);
2614 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2619 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
2621 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2622 unsigned long irqflags
;
2623 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2624 DE_PIPE_VBLANK(pipe
);
2626 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2627 ironlake_enable_display_irq(dev_priv
, bit
);
2628 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2633 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
2635 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2636 unsigned long irqflags
;
2638 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2639 i915_enable_pipestat(dev_priv
, pipe
,
2640 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2641 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2646 static int gen8_enable_vblank(struct drm_device
*dev
, int pipe
)
2648 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2649 unsigned long irqflags
;
2651 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2652 dev_priv
->de_irq_mask
[pipe
] &= ~GEN8_PIPE_VBLANK
;
2653 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2654 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
2655 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2659 /* Called from drm generic code, passed 'crtc' which
2660 * we use as a pipe index
2662 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
2664 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2665 unsigned long irqflags
;
2667 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2668 i915_disable_pipestat(dev_priv
, pipe
,
2669 PIPE_VBLANK_INTERRUPT_STATUS
|
2670 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2671 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2674 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
2676 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2677 unsigned long irqflags
;
2678 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2679 DE_PIPE_VBLANK(pipe
);
2681 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2682 ironlake_disable_display_irq(dev_priv
, bit
);
2683 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2686 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
2688 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2689 unsigned long irqflags
;
2691 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2692 i915_disable_pipestat(dev_priv
, pipe
,
2693 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2694 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2697 static void gen8_disable_vblank(struct drm_device
*dev
, int pipe
)
2699 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2700 unsigned long irqflags
;
2702 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2703 dev_priv
->de_irq_mask
[pipe
] |= GEN8_PIPE_VBLANK
;
2704 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2705 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
2706 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2710 ring_idle(struct intel_engine_cs
*ring
, u32 seqno
)
2712 return (list_empty(&ring
->request_list
) ||
2713 i915_seqno_passed(seqno
, ring
->last_submitted_seqno
));
2717 ipehr_is_semaphore_wait(struct drm_device
*dev
, u32 ipehr
)
2719 if (INTEL_INFO(dev
)->gen
>= 8) {
2720 return (ipehr
>> 23) == 0x1c;
2722 ipehr
&= ~MI_SEMAPHORE_SYNC_MASK
;
2723 return ipehr
== (MI_SEMAPHORE_MBOX
| MI_SEMAPHORE_COMPARE
|
2724 MI_SEMAPHORE_REGISTER
);
2728 static struct intel_engine_cs
*
2729 semaphore_wait_to_signaller_ring(struct intel_engine_cs
*ring
, u32 ipehr
, u64 offset
)
2731 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2732 struct intel_engine_cs
*signaller
;
2735 if (INTEL_INFO(dev_priv
->dev
)->gen
>= 8) {
2736 for_each_ring(signaller
, dev_priv
, i
) {
2737 if (ring
== signaller
)
2740 if (offset
== signaller
->semaphore
.signal_ggtt
[ring
->id
])
2744 u32 sync_bits
= ipehr
& MI_SEMAPHORE_SYNC_MASK
;
2746 for_each_ring(signaller
, dev_priv
, i
) {
2747 if(ring
== signaller
)
2750 if (sync_bits
== signaller
->semaphore
.mbox
.wait
[ring
->id
])
2755 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2756 ring
->id
, ipehr
, offset
);
2761 static struct intel_engine_cs
*
2762 semaphore_waits_for(struct intel_engine_cs
*ring
, u32
*seqno
)
2764 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2765 u32 cmd
, ipehr
, head
;
2769 ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
2770 if (!ipehr_is_semaphore_wait(ring
->dev
, ipehr
))
2774 * HEAD is likely pointing to the dword after the actual command,
2775 * so scan backwards until we find the MBOX. But limit it to just 3
2776 * or 4 dwords depending on the semaphore wait command size.
2777 * Note that we don't care about ACTHD here since that might
2778 * point at at batch, and semaphores are always emitted into the
2779 * ringbuffer itself.
2781 head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
2782 backwards
= (INTEL_INFO(ring
->dev
)->gen
>= 8) ? 5 : 4;
2784 for (i
= backwards
; i
; --i
) {
2786 * Be paranoid and presume the hw has gone off into the wild -
2787 * our ring is smaller than what the hardware (and hence
2788 * HEAD_ADDR) allows. Also handles wrap-around.
2790 head
&= ring
->buffer
->size
- 1;
2792 /* This here seems to blow up */
2793 cmd
= ioread32(ring
->buffer
->virtual_start
+ head
);
2803 *seqno
= ioread32(ring
->buffer
->virtual_start
+ head
+ 4) + 1;
2804 if (INTEL_INFO(ring
->dev
)->gen
>= 8) {
2805 offset
= ioread32(ring
->buffer
->virtual_start
+ head
+ 12);
2807 offset
= ioread32(ring
->buffer
->virtual_start
+ head
+ 8);
2809 return semaphore_wait_to_signaller_ring(ring
, ipehr
, offset
);
2812 static int semaphore_passed(struct intel_engine_cs
*ring
)
2814 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2815 struct intel_engine_cs
*signaller
;
2818 ring
->hangcheck
.deadlock
++;
2820 signaller
= semaphore_waits_for(ring
, &seqno
);
2821 if (signaller
== NULL
)
2824 /* Prevent pathological recursion due to driver bugs */
2825 if (signaller
->hangcheck
.deadlock
>= I915_NUM_RINGS
)
2828 if (i915_seqno_passed(signaller
->get_seqno(signaller
, false), seqno
))
2831 /* cursory check for an unkickable deadlock */
2832 if (I915_READ_CTL(signaller
) & RING_WAIT_SEMAPHORE
&&
2833 semaphore_passed(signaller
) < 0)
2839 static void semaphore_clear_deadlocks(struct drm_i915_private
*dev_priv
)
2841 struct intel_engine_cs
*ring
;
2844 for_each_ring(ring
, dev_priv
, i
)
2845 ring
->hangcheck
.deadlock
= 0;
2848 static enum intel_ring_hangcheck_action
2849 ring_stuck(struct intel_engine_cs
*ring
, u64 acthd
)
2851 struct drm_device
*dev
= ring
->dev
;
2852 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2855 if (acthd
!= ring
->hangcheck
.acthd
) {
2856 if (acthd
> ring
->hangcheck
.max_acthd
) {
2857 ring
->hangcheck
.max_acthd
= acthd
;
2858 return HANGCHECK_ACTIVE
;
2861 return HANGCHECK_ACTIVE_LOOP
;
2865 return HANGCHECK_HUNG
;
2867 /* Is the chip hanging on a WAIT_FOR_EVENT?
2868 * If so we can simply poke the RB_WAIT bit
2869 * and break the hang. This should work on
2870 * all but the second generation chipsets.
2872 tmp
= I915_READ_CTL(ring
);
2873 if (tmp
& RING_WAIT
) {
2874 i915_handle_error(dev
, false,
2875 "Kicking stuck wait on %s",
2877 I915_WRITE_CTL(ring
, tmp
);
2878 return HANGCHECK_KICK
;
2881 if (INTEL_INFO(dev
)->gen
>= 6 && tmp
& RING_WAIT_SEMAPHORE
) {
2882 switch (semaphore_passed(ring
)) {
2884 return HANGCHECK_HUNG
;
2886 i915_handle_error(dev
, false,
2887 "Kicking stuck semaphore on %s",
2889 I915_WRITE_CTL(ring
, tmp
);
2890 return HANGCHECK_KICK
;
2892 return HANGCHECK_WAIT
;
2896 return HANGCHECK_HUNG
;
2900 * This is called when the chip hasn't reported back with completed
2901 * batchbuffers in a long time. We keep track per ring seqno progress and
2902 * if there are no progress, hangcheck score for that ring is increased.
2903 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2904 * we kick the ring. If we see no progress on three subsequent calls
2905 * we assume chip is wedged and try to fix it by resetting the chip.
2907 static void i915_hangcheck_elapsed(struct work_struct
*work
)
2909 struct drm_i915_private
*dev_priv
=
2910 container_of(work
, typeof(*dev_priv
),
2911 gpu_error
.hangcheck_work
.work
);
2912 struct drm_device
*dev
= dev_priv
->dev
;
2913 struct intel_engine_cs
*ring
;
2915 int busy_count
= 0, rings_hung
= 0;
2916 bool stuck
[I915_NUM_RINGS
] = { 0 };
2921 if (!i915
.enable_hangcheck
)
2924 for_each_ring(ring
, dev_priv
, i
) {
2929 semaphore_clear_deadlocks(dev_priv
);
2931 seqno
= ring
->get_seqno(ring
, false);
2932 acthd
= intel_ring_get_active_head(ring
);
2934 if (ring
->hangcheck
.seqno
== seqno
) {
2935 if (ring_idle(ring
, seqno
)) {
2936 ring
->hangcheck
.action
= HANGCHECK_IDLE
;
2938 if (waitqueue_active(&ring
->irq_queue
)) {
2939 /* Issue a wake-up to catch stuck h/w. */
2940 if (!test_and_set_bit(ring
->id
, &dev_priv
->gpu_error
.missed_irq_rings
)) {
2941 if (!(dev_priv
->gpu_error
.test_irq_rings
& intel_ring_flag(ring
)))
2942 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2945 DRM_INFO("Fake missed irq on %s\n",
2947 wake_up_all(&ring
->irq_queue
);
2949 /* Safeguard against driver failure */
2950 ring
->hangcheck
.score
+= BUSY
;
2954 /* We always increment the hangcheck score
2955 * if the ring is busy and still processing
2956 * the same request, so that no single request
2957 * can run indefinitely (such as a chain of
2958 * batches). The only time we do not increment
2959 * the hangcheck score on this ring, if this
2960 * ring is in a legitimate wait for another
2961 * ring. In that case the waiting ring is a
2962 * victim and we want to be sure we catch the
2963 * right culprit. Then every time we do kick
2964 * the ring, add a small increment to the
2965 * score so that we can catch a batch that is
2966 * being repeatedly kicked and so responsible
2967 * for stalling the machine.
2969 ring
->hangcheck
.action
= ring_stuck(ring
,
2972 switch (ring
->hangcheck
.action
) {
2973 case HANGCHECK_IDLE
:
2974 case HANGCHECK_WAIT
:
2975 case HANGCHECK_ACTIVE
:
2977 case HANGCHECK_ACTIVE_LOOP
:
2978 ring
->hangcheck
.score
+= BUSY
;
2980 case HANGCHECK_KICK
:
2981 ring
->hangcheck
.score
+= KICK
;
2983 case HANGCHECK_HUNG
:
2984 ring
->hangcheck
.score
+= HUNG
;
2990 ring
->hangcheck
.action
= HANGCHECK_ACTIVE
;
2992 /* Gradually reduce the count so that we catch DoS
2993 * attempts across multiple batches.
2995 if (ring
->hangcheck
.score
> 0)
2996 ring
->hangcheck
.score
--;
2998 ring
->hangcheck
.acthd
= ring
->hangcheck
.max_acthd
= 0;
3001 ring
->hangcheck
.seqno
= seqno
;
3002 ring
->hangcheck
.acthd
= acthd
;
3006 for_each_ring(ring
, dev_priv
, i
) {
3007 if (ring
->hangcheck
.score
>= HANGCHECK_SCORE_RING_HUNG
) {
3008 DRM_INFO("%s on %s\n",
3009 stuck
[i
] ? "stuck" : "no progress",
3016 return i915_handle_error(dev
, true, "Ring hung");
3019 /* Reset timer case chip hangs without another request
3021 i915_queue_hangcheck(dev
);
3024 void i915_queue_hangcheck(struct drm_device
*dev
)
3026 struct i915_gpu_error
*e
= &to_i915(dev
)->gpu_error
;
3028 if (!i915
.enable_hangcheck
)
3031 /* Don't continually defer the hangcheck so that it is always run at
3032 * least once after work has been scheduled on any ring. Otherwise,
3033 * we will ignore a hung ring if a second ring is kept busy.
3036 queue_delayed_work(e
->hangcheck_wq
, &e
->hangcheck_work
,
3037 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES
));
3040 static void ibx_irq_reset(struct drm_device
*dev
)
3042 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3044 if (HAS_PCH_NOP(dev
))
3047 GEN5_IRQ_RESET(SDE
);
3049 if (HAS_PCH_CPT(dev
) || HAS_PCH_LPT(dev
))
3050 I915_WRITE(SERR_INT
, 0xffffffff);
3054 * SDEIER is also touched by the interrupt handler to work around missed PCH
3055 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3056 * instead we unconditionally enable all PCH interrupt sources here, but then
3057 * only unmask them as needed with SDEIMR.
3059 * This function needs to be called before interrupts are enabled.
3061 static void ibx_irq_pre_postinstall(struct drm_device
*dev
)
3063 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3065 if (HAS_PCH_NOP(dev
))
3068 WARN_ON(I915_READ(SDEIER
) != 0);
3069 I915_WRITE(SDEIER
, 0xffffffff);
3070 POSTING_READ(SDEIER
);
3073 static void gen5_gt_irq_reset(struct drm_device
*dev
)
3075 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3078 if (INTEL_INFO(dev
)->gen
>= 6)
3079 GEN5_IRQ_RESET(GEN6_PM
);
3084 static void ironlake_irq_reset(struct drm_device
*dev
)
3086 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3088 I915_WRITE(HWSTAM
, 0xffffffff);
3092 I915_WRITE(GEN7_ERR_INT
, 0xffffffff);
3094 gen5_gt_irq_reset(dev
);
3099 static void vlv_display_irq_reset(struct drm_i915_private
*dev_priv
)
3103 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3104 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3106 for_each_pipe(dev_priv
, pipe
)
3107 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3109 GEN5_IRQ_RESET(VLV_
);
3112 static void valleyview_irq_preinstall(struct drm_device
*dev
)
3114 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3117 I915_WRITE(VLV_IMR
, 0);
3118 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
3119 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
3120 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
3122 gen5_gt_irq_reset(dev
);
3124 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
3126 vlv_display_irq_reset(dev_priv
);
3129 static void gen8_gt_irq_reset(struct drm_i915_private
*dev_priv
)
3131 GEN8_IRQ_RESET_NDX(GT
, 0);
3132 GEN8_IRQ_RESET_NDX(GT
, 1);
3133 GEN8_IRQ_RESET_NDX(GT
, 2);
3134 GEN8_IRQ_RESET_NDX(GT
, 3);
3137 static void gen8_irq_reset(struct drm_device
*dev
)
3139 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3142 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3143 POSTING_READ(GEN8_MASTER_IRQ
);
3145 gen8_gt_irq_reset(dev_priv
);
3147 for_each_pipe(dev_priv
, pipe
)
3148 if (intel_display_power_is_enabled(dev_priv
,
3149 POWER_DOMAIN_PIPE(pipe
)))
3150 GEN8_IRQ_RESET_NDX(DE_PIPE
, pipe
);
3152 GEN5_IRQ_RESET(GEN8_DE_PORT_
);
3153 GEN5_IRQ_RESET(GEN8_DE_MISC_
);
3154 GEN5_IRQ_RESET(GEN8_PCU_
);
3156 if (HAS_PCH_SPLIT(dev
))
3160 void gen8_irq_power_well_post_enable(struct drm_i915_private
*dev_priv
,
3161 unsigned int pipe_mask
)
3163 uint32_t extra_ier
= GEN8_PIPE_VBLANK
| GEN8_PIPE_FIFO_UNDERRUN
;
3165 spin_lock_irq(&dev_priv
->irq_lock
);
3166 if (pipe_mask
& 1 << PIPE_A
)
3167 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_A
,
3168 dev_priv
->de_irq_mask
[PIPE_A
],
3169 ~dev_priv
->de_irq_mask
[PIPE_A
] | extra_ier
);
3170 if (pipe_mask
& 1 << PIPE_B
)
3171 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_B
,
3172 dev_priv
->de_irq_mask
[PIPE_B
],
3173 ~dev_priv
->de_irq_mask
[PIPE_B
] | extra_ier
);
3174 if (pipe_mask
& 1 << PIPE_C
)
3175 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_C
,
3176 dev_priv
->de_irq_mask
[PIPE_C
],
3177 ~dev_priv
->de_irq_mask
[PIPE_C
] | extra_ier
);
3178 spin_unlock_irq(&dev_priv
->irq_lock
);
3181 static void cherryview_irq_preinstall(struct drm_device
*dev
)
3183 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3185 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3186 POSTING_READ(GEN8_MASTER_IRQ
);
3188 gen8_gt_irq_reset(dev_priv
);
3190 GEN5_IRQ_RESET(GEN8_PCU_
);
3192 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK_CHV
);
3194 vlv_display_irq_reset(dev_priv
);
3197 static void ibx_hpd_irq_setup(struct drm_device
*dev
)
3199 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3200 struct intel_encoder
*intel_encoder
;
3201 u32 hotplug_irqs
, hotplug
, enabled_irqs
= 0;
3203 if (HAS_PCH_IBX(dev
)) {
3204 hotplug_irqs
= SDE_HOTPLUG_MASK
;
3205 for_each_intel_encoder(dev
, intel_encoder
)
3206 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
3207 enabled_irqs
|= hpd_ibx
[intel_encoder
->hpd_pin
];
3209 hotplug_irqs
= SDE_HOTPLUG_MASK_CPT
;
3210 for_each_intel_encoder(dev
, intel_encoder
)
3211 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
3212 enabled_irqs
|= hpd_cpt
[intel_encoder
->hpd_pin
];
3215 ibx_display_interrupt_update(dev_priv
, hotplug_irqs
, enabled_irqs
);
3218 * Enable digital hotplug on the PCH, and configure the DP short pulse
3219 * duration to 2ms (which is the minimum in the Display Port spec)
3221 * This register is the same on all known PCH chips.
3223 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
3224 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
3225 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
3226 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
3227 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
3228 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
3231 static void bxt_hpd_irq_setup(struct drm_device
*dev
)
3233 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3234 struct intel_encoder
*intel_encoder
;
3235 u32 hotplug_port
= 0;
3238 /* Now, enable HPD */
3239 for_each_intel_encoder(dev
, intel_encoder
) {
3240 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
3242 hotplug_port
|= hpd_bxt
[intel_encoder
->hpd_pin
];
3245 /* Mask all HPD control bits */
3246 hotplug_ctrl
= I915_READ(BXT_HOTPLUG_CTL
) & ~BXT_HOTPLUG_CTL_MASK
;
3248 /* Enable requested port in hotplug control */
3249 /* TODO: implement (short) HPD support on port A */
3250 WARN_ON_ONCE(hotplug_port
& BXT_DE_PORT_HP_DDIA
);
3251 if (hotplug_port
& BXT_DE_PORT_HP_DDIB
)
3252 hotplug_ctrl
|= BXT_DDIB_HPD_ENABLE
;
3253 if (hotplug_port
& BXT_DE_PORT_HP_DDIC
)
3254 hotplug_ctrl
|= BXT_DDIC_HPD_ENABLE
;
3255 I915_WRITE(BXT_HOTPLUG_CTL
, hotplug_ctrl
);
3257 /* Unmask DDI hotplug in IMR */
3258 hotplug_ctrl
= I915_READ(GEN8_DE_PORT_IMR
) & ~hotplug_port
;
3259 I915_WRITE(GEN8_DE_PORT_IMR
, hotplug_ctrl
);
3261 /* Enable DDI hotplug in IER */
3262 hotplug_ctrl
= I915_READ(GEN8_DE_PORT_IER
) | hotplug_port
;
3263 I915_WRITE(GEN8_DE_PORT_IER
, hotplug_ctrl
);
3264 POSTING_READ(GEN8_DE_PORT_IER
);
3267 static void ibx_irq_postinstall(struct drm_device
*dev
)
3269 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3272 if (HAS_PCH_NOP(dev
))
3275 if (HAS_PCH_IBX(dev
))
3276 mask
= SDE_GMBUS
| SDE_AUX_MASK
| SDE_POISON
;
3278 mask
= SDE_GMBUS_CPT
| SDE_AUX_MASK_CPT
;
3280 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR
);
3281 I915_WRITE(SDEIMR
, ~mask
);
3284 static void gen5_gt_irq_postinstall(struct drm_device
*dev
)
3286 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3287 u32 pm_irqs
, gt_irqs
;
3289 pm_irqs
= gt_irqs
= 0;
3291 dev_priv
->gt_irq_mask
= ~0;
3292 if (HAS_L3_DPF(dev
)) {
3293 /* L3 parity interrupt is always unmasked. */
3294 dev_priv
->gt_irq_mask
= ~GT_PARITY_ERROR(dev
);
3295 gt_irqs
|= GT_PARITY_ERROR(dev
);
3298 gt_irqs
|= GT_RENDER_USER_INTERRUPT
;
3300 gt_irqs
|= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
|
3301 ILK_BSD_USER_INTERRUPT
;
3303 gt_irqs
|= GT_BLT_USER_INTERRUPT
| GT_BSD_USER_INTERRUPT
;
3306 GEN5_IRQ_INIT(GT
, dev_priv
->gt_irq_mask
, gt_irqs
);
3308 if (INTEL_INFO(dev
)->gen
>= 6) {
3310 * RPS interrupts will get enabled/disabled on demand when RPS
3311 * itself is enabled/disabled.
3314 pm_irqs
|= PM_VEBOX_USER_INTERRUPT
;
3316 dev_priv
->pm_irq_mask
= 0xffffffff;
3317 GEN5_IRQ_INIT(GEN6_PM
, dev_priv
->pm_irq_mask
, pm_irqs
);
3321 static int ironlake_irq_postinstall(struct drm_device
*dev
)
3323 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3324 u32 display_mask
, extra_mask
;
3326 if (INTEL_INFO(dev
)->gen
>= 7) {
3327 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
|
3328 DE_PCH_EVENT_IVB
| DE_PLANEC_FLIP_DONE_IVB
|
3329 DE_PLANEB_FLIP_DONE_IVB
|
3330 DE_PLANEA_FLIP_DONE_IVB
| DE_AUX_CHANNEL_A_IVB
);
3331 extra_mask
= (DE_PIPEC_VBLANK_IVB
| DE_PIPEB_VBLANK_IVB
|
3332 DE_PIPEA_VBLANK_IVB
| DE_ERR_INT_IVB
);
3334 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
3335 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
|
3337 DE_PIPEB_CRC_DONE
| DE_PIPEA_CRC_DONE
|
3339 extra_mask
= DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
| DE_PCU_EVENT
|
3340 DE_PIPEB_FIFO_UNDERRUN
| DE_PIPEA_FIFO_UNDERRUN
;
3343 dev_priv
->irq_mask
= ~display_mask
;
3345 I915_WRITE(HWSTAM
, 0xeffe);
3347 ibx_irq_pre_postinstall(dev
);
3349 GEN5_IRQ_INIT(DE
, dev_priv
->irq_mask
, display_mask
| extra_mask
);
3351 gen5_gt_irq_postinstall(dev
);
3353 ibx_irq_postinstall(dev
);
3355 if (IS_IRONLAKE_M(dev
)) {
3356 /* Enable PCU event interrupts
3358 * spinlocking not required here for correctness since interrupt
3359 * setup is guaranteed to run in single-threaded context. But we
3360 * need it to make the assert_spin_locked happy. */
3361 spin_lock_irq(&dev_priv
->irq_lock
);
3362 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
3363 spin_unlock_irq(&dev_priv
->irq_lock
);
3369 static void valleyview_display_irqs_install(struct drm_i915_private
*dev_priv
)
3375 pipestat_mask
= PIPESTAT_INT_STATUS_MASK
|
3376 PIPE_FIFO_UNDERRUN_STATUS
;
3378 for_each_pipe(dev_priv
, pipe
)
3379 I915_WRITE(PIPESTAT(pipe
), pipestat_mask
);
3380 POSTING_READ(PIPESTAT(PIPE_A
));
3382 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3383 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3385 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
3386 for_each_pipe(dev_priv
, pipe
)
3387 i915_enable_pipestat(dev_priv
, pipe
, pipestat_mask
);
3389 iir_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3390 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3391 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3392 if (IS_CHERRYVIEW(dev_priv
))
3393 iir_mask
|= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
3394 dev_priv
->irq_mask
&= ~iir_mask
;
3396 I915_WRITE(VLV_IIR
, iir_mask
);
3397 I915_WRITE(VLV_IIR
, iir_mask
);
3398 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3399 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3400 POSTING_READ(VLV_IMR
);
3403 static void valleyview_display_irqs_uninstall(struct drm_i915_private
*dev_priv
)
3409 iir_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3410 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3411 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3412 if (IS_CHERRYVIEW(dev_priv
))
3413 iir_mask
|= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
3415 dev_priv
->irq_mask
|= iir_mask
;
3416 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3417 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3418 I915_WRITE(VLV_IIR
, iir_mask
);
3419 I915_WRITE(VLV_IIR
, iir_mask
);
3420 POSTING_READ(VLV_IIR
);
3422 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3423 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3425 i915_disable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
3426 for_each_pipe(dev_priv
, pipe
)
3427 i915_disable_pipestat(dev_priv
, pipe
, pipestat_mask
);
3429 pipestat_mask
= PIPESTAT_INT_STATUS_MASK
|
3430 PIPE_FIFO_UNDERRUN_STATUS
;
3432 for_each_pipe(dev_priv
, pipe
)
3433 I915_WRITE(PIPESTAT(pipe
), pipestat_mask
);
3434 POSTING_READ(PIPESTAT(PIPE_A
));
3437 void valleyview_enable_display_irqs(struct drm_i915_private
*dev_priv
)
3439 assert_spin_locked(&dev_priv
->irq_lock
);
3441 if (dev_priv
->display_irqs_enabled
)
3444 dev_priv
->display_irqs_enabled
= true;
3446 if (intel_irqs_enabled(dev_priv
))
3447 valleyview_display_irqs_install(dev_priv
);
3450 void valleyview_disable_display_irqs(struct drm_i915_private
*dev_priv
)
3452 assert_spin_locked(&dev_priv
->irq_lock
);
3454 if (!dev_priv
->display_irqs_enabled
)
3457 dev_priv
->display_irqs_enabled
= false;
3459 if (intel_irqs_enabled(dev_priv
))
3460 valleyview_display_irqs_uninstall(dev_priv
);
3463 static void vlv_display_irq_postinstall(struct drm_i915_private
*dev_priv
)
3465 dev_priv
->irq_mask
= ~0;
3467 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3468 POSTING_READ(PORT_HOTPLUG_EN
);
3470 I915_WRITE(VLV_IIR
, 0xffffffff);
3471 I915_WRITE(VLV_IIR
, 0xffffffff);
3472 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3473 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3474 POSTING_READ(VLV_IMR
);
3476 /* Interrupt setup is already guaranteed to be single-threaded, this is
3477 * just to make the assert_spin_locked check happy. */
3478 spin_lock_irq(&dev_priv
->irq_lock
);
3479 if (dev_priv
->display_irqs_enabled
)
3480 valleyview_display_irqs_install(dev_priv
);
3481 spin_unlock_irq(&dev_priv
->irq_lock
);
3484 static int valleyview_irq_postinstall(struct drm_device
*dev
)
3486 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3488 vlv_display_irq_postinstall(dev_priv
);
3490 gen5_gt_irq_postinstall(dev
);
3492 /* ack & enable invalid PTE error interrupts */
3493 #if 0 /* FIXME: add support to irq handler for checking these bits */
3494 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
3495 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
3498 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
3503 static void gen8_gt_irq_postinstall(struct drm_i915_private
*dev_priv
)
3505 /* These are interrupts we'll toggle with the ring mask register */
3506 uint32_t gt_interrupts
[] = {
3507 GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3508 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3509 GT_RENDER_L3_PARITY_ERROR_INTERRUPT
|
3510 GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
|
3511 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
,
3512 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3513 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3514 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
|
3515 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
,
3517 GT_RENDER_USER_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
|
3518 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
3521 dev_priv
->pm_irq_mask
= 0xffffffff;
3522 GEN8_IRQ_INIT_NDX(GT
, 0, ~gt_interrupts
[0], gt_interrupts
[0]);
3523 GEN8_IRQ_INIT_NDX(GT
, 1, ~gt_interrupts
[1], gt_interrupts
[1]);
3525 * RPS interrupts will get enabled/disabled on demand when RPS itself
3526 * is enabled/disabled.
3528 GEN8_IRQ_INIT_NDX(GT
, 2, dev_priv
->pm_irq_mask
, 0);
3529 GEN8_IRQ_INIT_NDX(GT
, 3, ~gt_interrupts
[3], gt_interrupts
[3]);
3532 static void gen8_de_irq_postinstall(struct drm_i915_private
*dev_priv
)
3534 uint32_t de_pipe_masked
= GEN8_PIPE_CDCLK_CRC_DONE
;
3535 uint32_t de_pipe_enables
;
3537 u32 de_port_en
= GEN8_AUX_CHANNEL_A
;
3539 if (IS_GEN9(dev_priv
)) {
3540 de_pipe_masked
|= GEN9_PIPE_PLANE1_FLIP_DONE
|
3541 GEN9_DE_PIPE_IRQ_FAULT_ERRORS
;
3542 de_port_en
|= GEN9_AUX_CHANNEL_B
| GEN9_AUX_CHANNEL_C
|
3545 if (IS_BROXTON(dev_priv
))
3546 de_port_en
|= BXT_DE_PORT_GMBUS
;
3548 de_pipe_masked
|= GEN8_PIPE_PRIMARY_FLIP_DONE
|
3549 GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
3551 de_pipe_enables
= de_pipe_masked
| GEN8_PIPE_VBLANK
|
3552 GEN8_PIPE_FIFO_UNDERRUN
;
3554 dev_priv
->de_irq_mask
[PIPE_A
] = ~de_pipe_masked
;
3555 dev_priv
->de_irq_mask
[PIPE_B
] = ~de_pipe_masked
;
3556 dev_priv
->de_irq_mask
[PIPE_C
] = ~de_pipe_masked
;
3558 for_each_pipe(dev_priv
, pipe
)
3559 if (intel_display_power_is_enabled(dev_priv
,
3560 POWER_DOMAIN_PIPE(pipe
)))
3561 GEN8_IRQ_INIT_NDX(DE_PIPE
, pipe
,
3562 dev_priv
->de_irq_mask
[pipe
],
3565 GEN5_IRQ_INIT(GEN8_DE_PORT_
, ~de_port_en
, de_port_en
);
3568 static int gen8_irq_postinstall(struct drm_device
*dev
)
3570 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3572 if (HAS_PCH_SPLIT(dev
))
3573 ibx_irq_pre_postinstall(dev
);
3575 gen8_gt_irq_postinstall(dev_priv
);
3576 gen8_de_irq_postinstall(dev_priv
);
3578 if (HAS_PCH_SPLIT(dev
))
3579 ibx_irq_postinstall(dev
);
3581 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
3582 POSTING_READ(GEN8_MASTER_IRQ
);
3587 static int cherryview_irq_postinstall(struct drm_device
*dev
)
3589 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3591 vlv_display_irq_postinstall(dev_priv
);
3593 gen8_gt_irq_postinstall(dev_priv
);
3595 I915_WRITE(GEN8_MASTER_IRQ
, MASTER_INTERRUPT_ENABLE
);
3596 POSTING_READ(GEN8_MASTER_IRQ
);
3601 static void gen8_irq_uninstall(struct drm_device
*dev
)
3603 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3608 gen8_irq_reset(dev
);
3611 static void vlv_display_irq_uninstall(struct drm_i915_private
*dev_priv
)
3613 /* Interrupt setup is already guaranteed to be single-threaded, this is
3614 * just to make the assert_spin_locked check happy. */
3615 spin_lock_irq(&dev_priv
->irq_lock
);
3616 if (dev_priv
->display_irqs_enabled
)
3617 valleyview_display_irqs_uninstall(dev_priv
);
3618 spin_unlock_irq(&dev_priv
->irq_lock
);
3620 vlv_display_irq_reset(dev_priv
);
3622 dev_priv
->irq_mask
= ~0;
3625 static void valleyview_irq_uninstall(struct drm_device
*dev
)
3627 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3632 I915_WRITE(VLV_MASTER_IER
, 0);
3634 gen5_gt_irq_reset(dev
);
3636 I915_WRITE(HWSTAM
, 0xffffffff);
3638 vlv_display_irq_uninstall(dev_priv
);
3641 static void cherryview_irq_uninstall(struct drm_device
*dev
)
3643 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3648 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3649 POSTING_READ(GEN8_MASTER_IRQ
);
3651 gen8_gt_irq_reset(dev_priv
);
3653 GEN5_IRQ_RESET(GEN8_PCU_
);
3655 vlv_display_irq_uninstall(dev_priv
);
3658 static void ironlake_irq_uninstall(struct drm_device
*dev
)
3660 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3665 ironlake_irq_reset(dev
);
3668 static void i8xx_irq_preinstall(struct drm_device
* dev
)
3670 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3673 for_each_pipe(dev_priv
, pipe
)
3674 I915_WRITE(PIPESTAT(pipe
), 0);
3675 I915_WRITE16(IMR
, 0xffff);
3676 I915_WRITE16(IER
, 0x0);
3677 POSTING_READ16(IER
);
3680 static int i8xx_irq_postinstall(struct drm_device
*dev
)
3682 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3685 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
3687 /* Unmask the interrupts that we always want on. */
3688 dev_priv
->irq_mask
=
3689 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3690 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3691 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3692 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
3693 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
3696 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3697 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3698 I915_USER_INTERRUPT
);
3699 POSTING_READ16(IER
);
3701 /* Interrupt setup is already guaranteed to be single-threaded, this is
3702 * just to make the assert_spin_locked check happy. */
3703 spin_lock_irq(&dev_priv
->irq_lock
);
3704 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3705 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3706 spin_unlock_irq(&dev_priv
->irq_lock
);
3712 * Returns true when a page flip has completed.
3714 static bool i8xx_handle_vblank(struct drm_device
*dev
,
3715 int plane
, int pipe
, u32 iir
)
3717 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3718 u16 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
3720 if (!intel_pipe_handle_vblank(dev
, pipe
))
3723 if ((iir
& flip_pending
) == 0)
3724 goto check_page_flip
;
3726 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3727 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3728 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3729 * the flip is completed (no longer pending). Since this doesn't raise
3730 * an interrupt per se, we watch for the change at vblank.
3732 if (I915_READ16(ISR
) & flip_pending
)
3733 goto check_page_flip
;
3735 intel_prepare_page_flip(dev
, plane
);
3736 intel_finish_page_flip(dev
, pipe
);
3740 intel_check_page_flip(dev
, pipe
);
3744 static irqreturn_t
i8xx_irq_handler(int irq
, void *arg
)
3746 struct drm_device
*dev
= arg
;
3747 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3752 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3753 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3755 if (!intel_irqs_enabled(dev_priv
))
3758 iir
= I915_READ16(IIR
);
3762 while (iir
& ~flip_mask
) {
3763 /* Can't rely on pipestat interrupt bit in iir as it might
3764 * have been cleared after the pipestat interrupt was received.
3765 * It doesn't set the bit in iir again, but it still produces
3766 * interrupts (for non-MSI).
3768 spin_lock(&dev_priv
->irq_lock
);
3769 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3770 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir
);
3772 for_each_pipe(dev_priv
, pipe
) {
3773 int reg
= PIPESTAT(pipe
);
3774 pipe_stats
[pipe
] = I915_READ(reg
);
3777 * Clear the PIPE*STAT regs before the IIR
3779 if (pipe_stats
[pipe
] & 0x8000ffff)
3780 I915_WRITE(reg
, pipe_stats
[pipe
]);
3782 spin_unlock(&dev_priv
->irq_lock
);
3784 I915_WRITE16(IIR
, iir
& ~flip_mask
);
3785 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
3787 if (iir
& I915_USER_INTERRUPT
)
3788 notify_ring(&dev_priv
->ring
[RCS
]);
3790 for_each_pipe(dev_priv
, pipe
) {
3795 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
3796 i8xx_handle_vblank(dev
, plane
, pipe
, iir
))
3797 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
3799 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
3800 i9xx_pipe_crc_irq_handler(dev
, pipe
);
3802 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3803 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
3813 static void i8xx_irq_uninstall(struct drm_device
* dev
)
3815 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3818 for_each_pipe(dev_priv
, pipe
) {
3819 /* Clear enable bits; then clear status bits */
3820 I915_WRITE(PIPESTAT(pipe
), 0);
3821 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
3823 I915_WRITE16(IMR
, 0xffff);
3824 I915_WRITE16(IER
, 0x0);
3825 I915_WRITE16(IIR
, I915_READ16(IIR
));
3828 static void i915_irq_preinstall(struct drm_device
* dev
)
3830 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3833 if (I915_HAS_HOTPLUG(dev
)) {
3834 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3835 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3838 I915_WRITE16(HWSTAM
, 0xeffe);
3839 for_each_pipe(dev_priv
, pipe
)
3840 I915_WRITE(PIPESTAT(pipe
), 0);
3841 I915_WRITE(IMR
, 0xffffffff);
3842 I915_WRITE(IER
, 0x0);
3846 static int i915_irq_postinstall(struct drm_device
*dev
)
3848 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3851 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
3853 /* Unmask the interrupts that we always want on. */
3854 dev_priv
->irq_mask
=
3855 ~(I915_ASLE_INTERRUPT
|
3856 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3857 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3858 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3859 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
3862 I915_ASLE_INTERRUPT
|
3863 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3864 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3865 I915_USER_INTERRUPT
;
3867 if (I915_HAS_HOTPLUG(dev
)) {
3868 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3869 POSTING_READ(PORT_HOTPLUG_EN
);
3871 /* Enable in IER... */
3872 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
3873 /* and unmask in IMR */
3874 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
3877 I915_WRITE(IMR
, dev_priv
->irq_mask
);
3878 I915_WRITE(IER
, enable_mask
);
3881 i915_enable_asle_pipestat(dev
);
3883 /* Interrupt setup is already guaranteed to be single-threaded, this is
3884 * just to make the assert_spin_locked check happy. */
3885 spin_lock_irq(&dev_priv
->irq_lock
);
3886 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3887 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3888 spin_unlock_irq(&dev_priv
->irq_lock
);
3894 * Returns true when a page flip has completed.
3896 static bool i915_handle_vblank(struct drm_device
*dev
,
3897 int plane
, int pipe
, u32 iir
)
3899 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3900 u32 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
3902 if (!intel_pipe_handle_vblank(dev
, pipe
))
3905 if ((iir
& flip_pending
) == 0)
3906 goto check_page_flip
;
3908 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3909 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3910 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3911 * the flip is completed (no longer pending). Since this doesn't raise
3912 * an interrupt per se, we watch for the change at vblank.
3914 if (I915_READ(ISR
) & flip_pending
)
3915 goto check_page_flip
;
3917 intel_prepare_page_flip(dev
, plane
);
3918 intel_finish_page_flip(dev
, pipe
);
3922 intel_check_page_flip(dev
, pipe
);
3926 static irqreturn_t
i915_irq_handler(int irq
, void *arg
)
3928 struct drm_device
*dev
= arg
;
3929 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3930 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
3932 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3933 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3934 int pipe
, ret
= IRQ_NONE
;
3936 if (!intel_irqs_enabled(dev_priv
))
3939 iir
= I915_READ(IIR
);
3941 bool irq_received
= (iir
& ~flip_mask
) != 0;
3942 bool blc_event
= false;
3944 /* Can't rely on pipestat interrupt bit in iir as it might
3945 * have been cleared after the pipestat interrupt was received.
3946 * It doesn't set the bit in iir again, but it still produces
3947 * interrupts (for non-MSI).
3949 spin_lock(&dev_priv
->irq_lock
);
3950 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3951 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir
);
3953 for_each_pipe(dev_priv
, pipe
) {
3954 int reg
= PIPESTAT(pipe
);
3955 pipe_stats
[pipe
] = I915_READ(reg
);
3957 /* Clear the PIPE*STAT regs before the IIR */
3958 if (pipe_stats
[pipe
] & 0x8000ffff) {
3959 I915_WRITE(reg
, pipe_stats
[pipe
]);
3960 irq_received
= true;
3963 spin_unlock(&dev_priv
->irq_lock
);
3968 /* Consume port. Then clear IIR or we'll miss events */
3969 if (I915_HAS_HOTPLUG(dev
) &&
3970 iir
& I915_DISPLAY_PORT_INTERRUPT
)
3971 i9xx_hpd_irq_handler(dev
);
3973 I915_WRITE(IIR
, iir
& ~flip_mask
);
3974 new_iir
= I915_READ(IIR
); /* Flush posted writes */
3976 if (iir
& I915_USER_INTERRUPT
)
3977 notify_ring(&dev_priv
->ring
[RCS
]);
3979 for_each_pipe(dev_priv
, pipe
) {
3984 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
3985 i915_handle_vblank(dev
, plane
, pipe
, iir
))
3986 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
3988 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
3991 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
3992 i9xx_pipe_crc_irq_handler(dev
, pipe
);
3994 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3995 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
3999 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4000 intel_opregion_asle_intr(dev
);
4002 /* With MSI, interrupts are only generated when iir
4003 * transitions from zero to nonzero. If another bit got
4004 * set while we were handling the existing iir bits, then
4005 * we would never get another interrupt.
4007 * This is fine on non-MSI as well, as if we hit this path
4008 * we avoid exiting the interrupt handler only to generate
4011 * Note that for MSI this could cause a stray interrupt report
4012 * if an interrupt landed in the time between writing IIR and
4013 * the posting read. This should be rare enough to never
4014 * trigger the 99% of 100,000 interrupts test for disabling
4019 } while (iir
& ~flip_mask
);
4024 static void i915_irq_uninstall(struct drm_device
* dev
)
4026 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4029 if (I915_HAS_HOTPLUG(dev
)) {
4030 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4031 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4034 I915_WRITE16(HWSTAM
, 0xffff);
4035 for_each_pipe(dev_priv
, pipe
) {
4036 /* Clear enable bits; then clear status bits */
4037 I915_WRITE(PIPESTAT(pipe
), 0);
4038 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
4040 I915_WRITE(IMR
, 0xffffffff);
4041 I915_WRITE(IER
, 0x0);
4043 I915_WRITE(IIR
, I915_READ(IIR
));
4046 static void i965_irq_preinstall(struct drm_device
* dev
)
4048 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4051 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4052 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4054 I915_WRITE(HWSTAM
, 0xeffe);
4055 for_each_pipe(dev_priv
, pipe
)
4056 I915_WRITE(PIPESTAT(pipe
), 0);
4057 I915_WRITE(IMR
, 0xffffffff);
4058 I915_WRITE(IER
, 0x0);
4062 static int i965_irq_postinstall(struct drm_device
*dev
)
4064 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4068 /* Unmask the interrupts that we always want on. */
4069 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
4070 I915_DISPLAY_PORT_INTERRUPT
|
4071 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4072 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4073 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4074 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
4075 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
4077 enable_mask
= ~dev_priv
->irq_mask
;
4078 enable_mask
&= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4079 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
4080 enable_mask
|= I915_USER_INTERRUPT
;
4083 enable_mask
|= I915_BSD_USER_INTERRUPT
;
4085 /* Interrupt setup is already guaranteed to be single-threaded, this is
4086 * just to make the assert_spin_locked check happy. */
4087 spin_lock_irq(&dev_priv
->irq_lock
);
4088 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
4089 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4090 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4091 spin_unlock_irq(&dev_priv
->irq_lock
);
4094 * Enable some error detection, note the instruction error mask
4095 * bit is reserved, so we leave it masked.
4098 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
4099 GM45_ERROR_MEM_PRIV
|
4100 GM45_ERROR_CP_PRIV
|
4101 I915_ERROR_MEMORY_REFRESH
);
4103 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
4104 I915_ERROR_MEMORY_REFRESH
);
4106 I915_WRITE(EMR
, error_mask
);
4108 I915_WRITE(IMR
, dev_priv
->irq_mask
);
4109 I915_WRITE(IER
, enable_mask
);
4112 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4113 POSTING_READ(PORT_HOTPLUG_EN
);
4115 i915_enable_asle_pipestat(dev
);
4120 static void i915_hpd_irq_setup(struct drm_device
*dev
)
4122 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4123 struct intel_encoder
*intel_encoder
;
4126 assert_spin_locked(&dev_priv
->irq_lock
);
4128 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
4129 hotplug_en
&= ~HOTPLUG_INT_EN_MASK
;
4130 /* Note HDMI and DP share hotplug bits */
4131 /* enable bits are the same for all generations */
4132 for_each_intel_encoder(dev
, intel_encoder
)
4133 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
4134 hotplug_en
|= hpd_mask_i915
[intel_encoder
->hpd_pin
];
4135 /* Programming the CRT detection parameters tends
4136 to generate a spurious hotplug event about three
4137 seconds later. So just do it once.
4140 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
4141 hotplug_en
&= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK
;
4142 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
4144 /* Ignore TV since it's buggy */
4145 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
4148 static irqreturn_t
i965_irq_handler(int irq
, void *arg
)
4150 struct drm_device
*dev
= arg
;
4151 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4153 u32 pipe_stats
[I915_MAX_PIPES
];
4154 int ret
= IRQ_NONE
, pipe
;
4156 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4157 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4159 if (!intel_irqs_enabled(dev_priv
))
4162 iir
= I915_READ(IIR
);
4165 bool irq_received
= (iir
& ~flip_mask
) != 0;
4166 bool blc_event
= false;
4168 /* Can't rely on pipestat interrupt bit in iir as it might
4169 * have been cleared after the pipestat interrupt was received.
4170 * It doesn't set the bit in iir again, but it still produces
4171 * interrupts (for non-MSI).
4173 spin_lock(&dev_priv
->irq_lock
);
4174 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4175 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir
);
4177 for_each_pipe(dev_priv
, pipe
) {
4178 int reg
= PIPESTAT(pipe
);
4179 pipe_stats
[pipe
] = I915_READ(reg
);
4182 * Clear the PIPE*STAT regs before the IIR
4184 if (pipe_stats
[pipe
] & 0x8000ffff) {
4185 I915_WRITE(reg
, pipe_stats
[pipe
]);
4186 irq_received
= true;
4189 spin_unlock(&dev_priv
->irq_lock
);
4196 /* Consume port. Then clear IIR or we'll miss events */
4197 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
4198 i9xx_hpd_irq_handler(dev
);
4200 I915_WRITE(IIR
, iir
& ~flip_mask
);
4201 new_iir
= I915_READ(IIR
); /* Flush posted writes */
4203 if (iir
& I915_USER_INTERRUPT
)
4204 notify_ring(&dev_priv
->ring
[RCS
]);
4205 if (iir
& I915_BSD_USER_INTERRUPT
)
4206 notify_ring(&dev_priv
->ring
[VCS
]);
4208 for_each_pipe(dev_priv
, pipe
) {
4209 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
4210 i915_handle_vblank(dev
, pipe
, pipe
, iir
))
4211 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(pipe
);
4213 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
4216 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4217 i9xx_pipe_crc_irq_handler(dev
, pipe
);
4219 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
4220 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
4223 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4224 intel_opregion_asle_intr(dev
);
4226 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
4227 gmbus_irq_handler(dev
);
4229 /* With MSI, interrupts are only generated when iir
4230 * transitions from zero to nonzero. If another bit got
4231 * set while we were handling the existing iir bits, then
4232 * we would never get another interrupt.
4234 * This is fine on non-MSI as well, as if we hit this path
4235 * we avoid exiting the interrupt handler only to generate
4238 * Note that for MSI this could cause a stray interrupt report
4239 * if an interrupt landed in the time between writing IIR and
4240 * the posting read. This should be rare enough to never
4241 * trigger the 99% of 100,000 interrupts test for disabling
4250 static void i965_irq_uninstall(struct drm_device
* dev
)
4252 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4258 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4259 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4261 I915_WRITE(HWSTAM
, 0xffffffff);
4262 for_each_pipe(dev_priv
, pipe
)
4263 I915_WRITE(PIPESTAT(pipe
), 0);
4264 I915_WRITE(IMR
, 0xffffffff);
4265 I915_WRITE(IER
, 0x0);
4267 for_each_pipe(dev_priv
, pipe
)
4268 I915_WRITE(PIPESTAT(pipe
),
4269 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
4270 I915_WRITE(IIR
, I915_READ(IIR
));
4273 static void intel_hpd_irq_reenable_work(struct work_struct
*work
)
4275 struct drm_i915_private
*dev_priv
=
4276 container_of(work
, typeof(*dev_priv
),
4277 hotplug_reenable_work
.work
);
4278 struct drm_device
*dev
= dev_priv
->dev
;
4279 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4282 intel_runtime_pm_get(dev_priv
);
4284 spin_lock_irq(&dev_priv
->irq_lock
);
4285 for (i
= (HPD_NONE
+ 1); i
< HPD_NUM_PINS
; i
++) {
4286 struct drm_connector
*connector
;
4288 if (dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_DISABLED
)
4291 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
4293 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
4294 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4296 if (intel_connector
->encoder
->hpd_pin
== i
) {
4297 if (connector
->polled
!= intel_connector
->polled
)
4298 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4300 connector
->polled
= intel_connector
->polled
;
4301 if (!connector
->polled
)
4302 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4306 if (dev_priv
->display
.hpd_irq_setup
)
4307 dev_priv
->display
.hpd_irq_setup(dev
);
4308 spin_unlock_irq(&dev_priv
->irq_lock
);
4310 intel_runtime_pm_put(dev_priv
);
4314 * intel_irq_init - initializes irq support
4315 * @dev_priv: i915 device instance
4317 * This function initializes all the irq support including work items, timers
4318 * and all the vtables. It does not setup the interrupt itself though.
4320 void intel_irq_init(struct drm_i915_private
*dev_priv
)
4322 struct drm_device
*dev
= dev_priv
->dev
;
4324 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
4325 INIT_WORK(&dev_priv
->dig_port_work
, i915_digport_work_func
);
4326 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
4327 INIT_WORK(&dev_priv
->l3_parity
.error_work
, ivybridge_parity_work
);
4329 /* Let's track the enabled rps events */
4330 if (IS_VALLEYVIEW(dev_priv
) && !IS_CHERRYVIEW(dev_priv
))
4331 /* WaGsvRC0ResidencyMethod:vlv */
4332 dev_priv
->pm_rps_events
= GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_UP_EI_EXPIRED
;
4334 dev_priv
->pm_rps_events
= GEN6_PM_RPS_EVENTS
;
4336 INIT_DELAYED_WORK(&dev_priv
->gpu_error
.hangcheck_work
,
4337 i915_hangcheck_elapsed
);
4338 INIT_DELAYED_WORK(&dev_priv
->hotplug_reenable_work
,
4339 intel_hpd_irq_reenable_work
);
4341 pm_qos_add_request(&dev_priv
->pm_qos
, PM_QOS_CPU_DMA_LATENCY
, PM_QOS_DEFAULT_VALUE
);
4343 if (IS_GEN2(dev_priv
)) {
4344 dev
->max_vblank_count
= 0;
4345 dev
->driver
->get_vblank_counter
= i8xx_get_vblank_counter
;
4346 } else if (IS_G4X(dev_priv
) || INTEL_INFO(dev_priv
)->gen
>= 5) {
4347 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
4348 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
4350 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
4351 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
4355 * Opt out of the vblank disable timer on everything except gen2.
4356 * Gen2 doesn't have a hardware frame counter and so depends on
4357 * vblank interrupts to produce sane vblank seuquence numbers.
4359 if (!IS_GEN2(dev_priv
))
4360 dev
->vblank_disable_immediate
= true;
4362 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
4363 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
4365 if (IS_CHERRYVIEW(dev_priv
)) {
4366 dev
->driver
->irq_handler
= cherryview_irq_handler
;
4367 dev
->driver
->irq_preinstall
= cherryview_irq_preinstall
;
4368 dev
->driver
->irq_postinstall
= cherryview_irq_postinstall
;
4369 dev
->driver
->irq_uninstall
= cherryview_irq_uninstall
;
4370 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4371 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4372 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4373 } else if (IS_VALLEYVIEW(dev_priv
)) {
4374 dev
->driver
->irq_handler
= valleyview_irq_handler
;
4375 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
4376 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
4377 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
4378 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4379 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4380 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4381 } else if (INTEL_INFO(dev_priv
)->gen
>= 8) {
4382 dev
->driver
->irq_handler
= gen8_irq_handler
;
4383 dev
->driver
->irq_preinstall
= gen8_irq_reset
;
4384 dev
->driver
->irq_postinstall
= gen8_irq_postinstall
;
4385 dev
->driver
->irq_uninstall
= gen8_irq_uninstall
;
4386 dev
->driver
->enable_vblank
= gen8_enable_vblank
;
4387 dev
->driver
->disable_vblank
= gen8_disable_vblank
;
4388 if (HAS_PCH_SPLIT(dev
))
4389 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
4391 dev_priv
->display
.hpd_irq_setup
= bxt_hpd_irq_setup
;
4392 } else if (HAS_PCH_SPLIT(dev
)) {
4393 dev
->driver
->irq_handler
= ironlake_irq_handler
;
4394 dev
->driver
->irq_preinstall
= ironlake_irq_reset
;
4395 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
4396 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
4397 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
4398 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
4399 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
4401 if (INTEL_INFO(dev_priv
)->gen
== 2) {
4402 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
4403 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
4404 dev
->driver
->irq_handler
= i8xx_irq_handler
;
4405 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
4406 } else if (INTEL_INFO(dev_priv
)->gen
== 3) {
4407 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
4408 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
4409 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
4410 dev
->driver
->irq_handler
= i915_irq_handler
;
4412 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
4413 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
4414 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
4415 dev
->driver
->irq_handler
= i965_irq_handler
;
4417 if (I915_HAS_HOTPLUG(dev_priv
))
4418 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4419 dev
->driver
->enable_vblank
= i915_enable_vblank
;
4420 dev
->driver
->disable_vblank
= i915_disable_vblank
;
4425 * intel_hpd_init - initializes and enables hpd support
4426 * @dev_priv: i915 device instance
4428 * This function enables the hotplug support. It requires that interrupts have
4429 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4430 * poll request can run concurrently to other code, so locking rules must be
4433 * This is a separate step from interrupt enabling to simplify the locking rules
4434 * in the driver load and resume code.
4436 void intel_hpd_init(struct drm_i915_private
*dev_priv
)
4438 struct drm_device
*dev
= dev_priv
->dev
;
4439 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4440 struct drm_connector
*connector
;
4443 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
4444 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
4445 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
4447 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
4448 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4449 connector
->polled
= intel_connector
->polled
;
4450 if (connector
->encoder
&& !connector
->polled
&& I915_HAS_HOTPLUG(dev
) && intel_connector
->encoder
->hpd_pin
> HPD_NONE
)
4451 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4452 if (intel_connector
->mst_port
)
4453 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4456 /* Interrupt setup is already guaranteed to be single-threaded, this is
4457 * just to make the assert_spin_locked checks happy. */
4458 spin_lock_irq(&dev_priv
->irq_lock
);
4459 if (dev_priv
->display
.hpd_irq_setup
)
4460 dev_priv
->display
.hpd_irq_setup(dev
);
4461 spin_unlock_irq(&dev_priv
->irq_lock
);
4465 * intel_irq_install - enables the hardware interrupt
4466 * @dev_priv: i915 device instance
4468 * This function enables the hardware interrupt handling, but leaves the hotplug
4469 * handling still disabled. It is called after intel_irq_init().
4471 * In the driver load and resume code we need working interrupts in a few places
4472 * but don't want to deal with the hassle of concurrent probe and hotplug
4473 * workers. Hence the split into this two-stage approach.
4475 int intel_irq_install(struct drm_i915_private
*dev_priv
)
4478 * We enable some interrupt sources in our postinstall hooks, so mark
4479 * interrupts as enabled _before_ actually enabling them to avoid
4480 * special cases in our ordering checks.
4482 dev_priv
->pm
.irqs_enabled
= true;
4484 return drm_irq_install(dev_priv
->dev
, dev_priv
->dev
->pdev
->irq
);
4488 * intel_irq_uninstall - finilizes all irq handling
4489 * @dev_priv: i915 device instance
4491 * This stops interrupt and hotplug handling and unregisters and frees all
4492 * resources acquired in the init functions.
4494 void intel_irq_uninstall(struct drm_i915_private
*dev_priv
)
4496 drm_irq_uninstall(dev_priv
->dev
);
4497 intel_hpd_cancel_work(dev_priv
);
4498 dev_priv
->pm
.irqs_enabled
= false;
4502 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4503 * @dev_priv: i915 device instance
4505 * This function is used to disable interrupts at runtime, both in the runtime
4506 * pm and the system suspend/resume code.
4508 void intel_runtime_pm_disable_interrupts(struct drm_i915_private
*dev_priv
)
4510 dev_priv
->dev
->driver
->irq_uninstall(dev_priv
->dev
);
4511 dev_priv
->pm
.irqs_enabled
= false;
4512 synchronize_irq(dev_priv
->dev
->irq
);
4516 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4517 * @dev_priv: i915 device instance
4519 * This function is used to enable interrupts at runtime, both in the runtime
4520 * pm and the system suspend/resume code.
4522 void intel_runtime_pm_enable_interrupts(struct drm_i915_private
*dev_priv
)
4524 dev_priv
->pm
.irqs_enabled
= true;
4525 dev_priv
->dev
->driver
->irq_preinstall(dev_priv
->dev
);
4526 dev_priv
->dev
->driver
->irq_postinstall(dev_priv
->dev
);