1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
40 /* For display hotplug interrupt */
42 ironlake_enable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
44 if ((dev_priv
->irq_mask
& mask
) != 0) {
45 dev_priv
->irq_mask
&= ~mask
;
46 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
52 ironlake_disable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
54 if ((dev_priv
->irq_mask
& mask
) != mask
) {
55 dev_priv
->irq_mask
|= mask
;
56 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
62 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
64 if ((dev_priv
->pipestat
[pipe
] & mask
) != mask
) {
65 u32 reg
= PIPESTAT(pipe
);
67 dev_priv
->pipestat
[pipe
] |= mask
;
68 /* Enable the interrupt, clear any pending status */
69 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
] | (mask
>> 16));
75 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
77 if ((dev_priv
->pipestat
[pipe
] & mask
) != 0) {
78 u32 reg
= PIPESTAT(pipe
);
80 dev_priv
->pipestat
[pipe
] &= ~mask
;
81 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
]);
87 * intel_enable_asle - enable ASLE interrupt for OpRegion
89 void intel_enable_asle(struct drm_device
*dev
)
91 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
92 unsigned long irqflags
;
94 /* FIXME: opregion/asle for VLV */
95 if (IS_VALLEYVIEW(dev
))
98 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
100 if (HAS_PCH_SPLIT(dev
))
101 ironlake_enable_display_irq(dev_priv
, DE_GSE
);
103 i915_enable_pipestat(dev_priv
, 1,
104 PIPE_LEGACY_BLC_EVENT_ENABLE
);
105 if (INTEL_INFO(dev
)->gen
>= 4)
106 i915_enable_pipestat(dev_priv
, 0,
107 PIPE_LEGACY_BLC_EVENT_ENABLE
);
110 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
114 * i915_pipe_enabled - check if a pipe is enabled
116 * @pipe: pipe to check
118 * Reading certain registers when the pipe is disabled can hang the chip.
119 * Use this routine to make sure the PLL is running and the pipe is active
120 * before reading such registers if unsure.
123 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
125 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
126 return I915_READ(PIPECONF(pipe
)) & PIPECONF_ENABLE
;
129 /* Called from drm generic code, passed a 'crtc', which
130 * we use as a pipe index
132 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
134 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
135 unsigned long high_frame
;
136 unsigned long low_frame
;
137 u32 high1
, high2
, low
;
139 if (!i915_pipe_enabled(dev
, pipe
)) {
140 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
141 "pipe %c\n", pipe_name(pipe
));
145 high_frame
= PIPEFRAME(pipe
);
146 low_frame
= PIPEFRAMEPIXEL(pipe
);
149 * High & low register fields aren't synchronized, so make sure
150 * we get a low value that's stable across two reads of the high
154 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
155 low
= I915_READ(low_frame
) & PIPE_FRAME_LOW_MASK
;
156 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
157 } while (high1
!= high2
);
159 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
160 low
>>= PIPE_FRAME_LOW_SHIFT
;
161 return (high1
<< 8) | low
;
164 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
166 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
167 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
169 if (!i915_pipe_enabled(dev
, pipe
)) {
170 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
171 "pipe %c\n", pipe_name(pipe
));
175 return I915_READ(reg
);
178 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
179 int *vpos
, int *hpos
)
181 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
182 u32 vbl
= 0, position
= 0;
183 int vbl_start
, vbl_end
, htotal
, vtotal
;
187 if (!i915_pipe_enabled(dev
, pipe
)) {
188 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
189 "pipe %c\n", pipe_name(pipe
));
194 vtotal
= 1 + ((I915_READ(VTOTAL(pipe
)) >> 16) & 0x1fff);
196 if (INTEL_INFO(dev
)->gen
>= 4) {
197 /* No obvious pixelcount register. Only query vertical
198 * scanout position from Display scan line register.
200 position
= I915_READ(PIPEDSL(pipe
));
202 /* Decode into vertical scanout position. Don't have
203 * horizontal scanout position.
205 *vpos
= position
& 0x1fff;
208 /* Have access to pixelcount since start of frame.
209 * We can split this into vertical and horizontal
212 position
= (I915_READ(PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
214 htotal
= 1 + ((I915_READ(HTOTAL(pipe
)) >> 16) & 0x1fff);
215 *vpos
= position
/ htotal
;
216 *hpos
= position
- (*vpos
* htotal
);
219 /* Query vblank area. */
220 vbl
= I915_READ(VBLANK(pipe
));
222 /* Test position against vblank region. */
223 vbl_start
= vbl
& 0x1fff;
224 vbl_end
= (vbl
>> 16) & 0x1fff;
226 if ((*vpos
< vbl_start
) || (*vpos
> vbl_end
))
229 /* Inside "upper part" of vblank area? Apply corrective offset: */
230 if (in_vbl
&& (*vpos
>= vbl_start
))
231 *vpos
= *vpos
- vtotal
;
233 /* Readouts valid? */
235 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
239 ret
|= DRM_SCANOUTPOS_INVBL
;
244 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
246 struct timeval
*vblank_time
,
249 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
250 struct drm_crtc
*crtc
;
252 if (pipe
< 0 || pipe
>= dev_priv
->num_pipe
) {
253 DRM_ERROR("Invalid crtc %d\n", pipe
);
257 /* Get drm_crtc to timestamp: */
258 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
260 DRM_ERROR("Invalid crtc %d\n", pipe
);
264 if (!crtc
->enabled
) {
265 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
269 /* Helper routine in DRM core does all the work: */
270 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
276 * Handle hotplug events outside the interrupt handler proper.
278 static void i915_hotplug_work_func(struct work_struct
*work
)
280 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
282 struct drm_device
*dev
= dev_priv
->dev
;
283 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
284 struct intel_encoder
*encoder
;
286 mutex_lock(&mode_config
->mutex
);
287 DRM_DEBUG_KMS("running encoder hotplug functions\n");
289 list_for_each_entry(encoder
, &mode_config
->encoder_list
, base
.head
)
290 if (encoder
->hot_plug
)
291 encoder
->hot_plug(encoder
);
293 mutex_unlock(&mode_config
->mutex
);
295 /* Just fire off a uevent and let userspace tell us what to do */
296 drm_helper_hpd_irq_event(dev
);
299 static void i915_handle_rps_change(struct drm_device
*dev
)
301 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
302 u32 busy_up
, busy_down
, max_avg
, min_avg
;
303 u8 new_delay
= dev_priv
->cur_delay
;
305 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
306 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
307 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
308 max_avg
= I915_READ(RCBMAXAVG
);
309 min_avg
= I915_READ(RCBMINAVG
);
311 /* Handle RCS change request from hw */
312 if (busy_up
> max_avg
) {
313 if (dev_priv
->cur_delay
!= dev_priv
->max_delay
)
314 new_delay
= dev_priv
->cur_delay
- 1;
315 if (new_delay
< dev_priv
->max_delay
)
316 new_delay
= dev_priv
->max_delay
;
317 } else if (busy_down
< min_avg
) {
318 if (dev_priv
->cur_delay
!= dev_priv
->min_delay
)
319 new_delay
= dev_priv
->cur_delay
+ 1;
320 if (new_delay
> dev_priv
->min_delay
)
321 new_delay
= dev_priv
->min_delay
;
324 if (ironlake_set_drps(dev
, new_delay
))
325 dev_priv
->cur_delay
= new_delay
;
330 static void notify_ring(struct drm_device
*dev
,
331 struct intel_ring_buffer
*ring
)
333 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
335 if (ring
->obj
== NULL
)
338 trace_i915_gem_request_complete(ring
, ring
->get_seqno(ring
));
340 wake_up_all(&ring
->irq_queue
);
341 if (i915_enable_hangcheck
) {
342 dev_priv
->hangcheck_count
= 0;
343 mod_timer(&dev_priv
->hangcheck_timer
,
345 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD
));
349 static void gen6_pm_rps_work(struct work_struct
*work
)
351 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
356 spin_lock_irq(&dev_priv
->rps_lock
);
357 pm_iir
= dev_priv
->pm_iir
;
358 dev_priv
->pm_iir
= 0;
359 pm_imr
= I915_READ(GEN6_PMIMR
);
360 I915_WRITE(GEN6_PMIMR
, 0);
361 spin_unlock_irq(&dev_priv
->rps_lock
);
363 if ((pm_iir
& GEN6_PM_DEFERRED_EVENTS
) == 0)
366 mutex_lock(&dev_priv
->dev
->struct_mutex
);
368 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
)
369 new_delay
= dev_priv
->cur_delay
+ 1;
371 new_delay
= dev_priv
->cur_delay
- 1;
373 gen6_set_rps(dev_priv
->dev
, new_delay
);
375 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
380 * ivybridge_parity_work - Workqueue called when a parity error interrupt
382 * @work: workqueue struct
384 * Doesn't actually do anything except notify userspace. As a consequence of
385 * this event, userspace should try to remap the bad rows since statistically
386 * it is likely the same row is more likely to go bad again.
388 static void ivybridge_parity_work(struct work_struct
*work
)
390 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
392 u32 error_status
, row
, bank
, subbank
;
393 char *parity_event
[5];
397 /* We must turn off DOP level clock gating to access the L3 registers.
398 * In order to prevent a get/put style interface, acquire struct mutex
399 * any time we access those registers.
401 mutex_lock(&dev_priv
->dev
->struct_mutex
);
403 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
404 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
405 POSTING_READ(GEN7_MISCCPCTL
);
407 error_status
= I915_READ(GEN7_L3CDERRST1
);
408 row
= GEN7_PARITY_ERROR_ROW(error_status
);
409 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
410 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
412 I915_WRITE(GEN7_L3CDERRST1
, GEN7_PARITY_ERROR_VALID
|
413 GEN7_L3CDERRST1_ENABLE
);
414 POSTING_READ(GEN7_L3CDERRST1
);
416 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
418 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
419 dev_priv
->gt_irq_mask
&= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
420 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
421 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
423 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
425 parity_event
[0] = "L3_PARITY_ERROR=1";
426 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
427 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
428 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
429 parity_event
[4] = NULL
;
431 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
.kobj
,
432 KOBJ_CHANGE
, parity_event
);
434 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
437 kfree(parity_event
[3]);
438 kfree(parity_event
[2]);
439 kfree(parity_event
[1]);
442 static void ivybridge_handle_parity_error(struct drm_device
*dev
)
444 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
447 if (!IS_IVYBRIDGE(dev
))
450 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
451 dev_priv
->gt_irq_mask
|= GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
452 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
453 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
455 queue_work(dev_priv
->wq
, &dev_priv
->parity_error_work
);
458 static void snb_gt_irq_handler(struct drm_device
*dev
,
459 struct drm_i915_private
*dev_priv
,
463 if (gt_iir
& (GEN6_RENDER_USER_INTERRUPT
|
464 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT
))
465 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
466 if (gt_iir
& GEN6_BSD_USER_INTERRUPT
)
467 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
468 if (gt_iir
& GEN6_BLITTER_USER_INTERRUPT
)
469 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
471 if (gt_iir
& (GT_GEN6_BLT_CS_ERROR_INTERRUPT
|
472 GT_GEN6_BSD_CS_ERROR_INTERRUPT
|
473 GT_RENDER_CS_ERROR_INTERRUPT
)) {
474 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir
);
475 i915_handle_error(dev
, false);
478 if (gt_iir
& GT_GEN7_L3_PARITY_ERROR_INTERRUPT
)
479 ivybridge_handle_parity_error(dev
);
482 static void gen6_queue_rps_work(struct drm_i915_private
*dev_priv
,
488 * IIR bits should never already be set because IMR should
489 * prevent an interrupt from being shown in IIR. The warning
490 * displays a case where we've unsafely cleared
491 * dev_priv->pm_iir. Although missing an interrupt of the same
492 * type is not a problem, it displays a problem in the logic.
494 * The mask bit in IMR is cleared by rps_work.
497 spin_lock_irqsave(&dev_priv
->rps_lock
, flags
);
498 dev_priv
->pm_iir
|= pm_iir
;
499 I915_WRITE(GEN6_PMIMR
, dev_priv
->pm_iir
);
500 POSTING_READ(GEN6_PMIMR
);
501 spin_unlock_irqrestore(&dev_priv
->rps_lock
, flags
);
503 queue_work(dev_priv
->wq
, &dev_priv
->rps_work
);
506 static irqreturn_t
valleyview_irq_handler(DRM_IRQ_ARGS
)
508 struct drm_device
*dev
= (struct drm_device
*) arg
;
509 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
510 u32 iir
, gt_iir
, pm_iir
;
511 irqreturn_t ret
= IRQ_NONE
;
512 unsigned long irqflags
;
514 u32 pipe_stats
[I915_MAX_PIPES
];
517 atomic_inc(&dev_priv
->irq_received
);
520 iir
= I915_READ(VLV_IIR
);
521 gt_iir
= I915_READ(GTIIR
);
522 pm_iir
= I915_READ(GEN6_PMIIR
);
524 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
529 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
531 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
532 for_each_pipe(pipe
) {
533 int reg
= PIPESTAT(pipe
);
534 pipe_stats
[pipe
] = I915_READ(reg
);
537 * Clear the PIPE*STAT regs before the IIR
539 if (pipe_stats
[pipe
] & 0x8000ffff) {
540 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
541 DRM_DEBUG_DRIVER("pipe %c underrun\n",
543 I915_WRITE(reg
, pipe_stats
[pipe
]);
546 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
548 for_each_pipe(pipe
) {
549 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
)
550 drm_handle_vblank(dev
, pipe
);
552 if (pipe_stats
[pipe
] & PLANE_FLIPDONE_INT_STATUS_VLV
) {
553 intel_prepare_page_flip(dev
, pipe
);
554 intel_finish_page_flip(dev
, pipe
);
558 /* Consume port. Then clear IIR or we'll miss events */
559 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
560 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
562 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
564 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
565 queue_work(dev_priv
->wq
,
566 &dev_priv
->hotplug_work
);
568 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
569 I915_READ(PORT_HOTPLUG_STAT
);
572 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
575 if (pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
576 gen6_queue_rps_work(dev_priv
, pm_iir
);
578 I915_WRITE(GTIIR
, gt_iir
);
579 I915_WRITE(GEN6_PMIIR
, pm_iir
);
580 I915_WRITE(VLV_IIR
, iir
);
587 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
589 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
592 if (pch_iir
& SDE_AUDIO_POWER_MASK
)
593 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
594 (pch_iir
& SDE_AUDIO_POWER_MASK
) >>
595 SDE_AUDIO_POWER_SHIFT
);
597 if (pch_iir
& SDE_GMBUS
)
598 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
600 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
601 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
603 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
604 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
606 if (pch_iir
& SDE_POISON
)
607 DRM_ERROR("PCH poison interrupt\n");
609 if (pch_iir
& SDE_FDI_MASK
)
611 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
613 I915_READ(FDI_RX_IIR(pipe
)));
615 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
616 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
618 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
619 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
621 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
622 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
623 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
624 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
627 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
629 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
632 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
)
633 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
634 (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
635 SDE_AUDIO_POWER_SHIFT_CPT
);
637 if (pch_iir
& SDE_AUX_MASK_CPT
)
638 DRM_DEBUG_DRIVER("AUX channel interrupt\n");
640 if (pch_iir
& SDE_GMBUS_CPT
)
641 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
643 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
644 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
646 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
647 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
649 if (pch_iir
& SDE_FDI_MASK_CPT
)
651 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
653 I915_READ(FDI_RX_IIR(pipe
)));
656 static irqreturn_t
ivybridge_irq_handler(DRM_IRQ_ARGS
)
658 struct drm_device
*dev
= (struct drm_device
*) arg
;
659 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
660 u32 de_iir
, gt_iir
, de_ier
, pm_iir
;
661 irqreturn_t ret
= IRQ_NONE
;
664 atomic_inc(&dev_priv
->irq_received
);
666 /* disable master interrupt before clearing iir */
667 de_ier
= I915_READ(DEIER
);
668 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
670 gt_iir
= I915_READ(GTIIR
);
672 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
673 I915_WRITE(GTIIR
, gt_iir
);
677 de_iir
= I915_READ(DEIIR
);
679 if (de_iir
& DE_GSE_IVB
)
680 intel_opregion_gse_intr(dev
);
682 for (i
= 0; i
< 3; i
++) {
683 if (de_iir
& (DE_PIPEA_VBLANK_IVB
<< (5 * i
)))
684 drm_handle_vblank(dev
, i
);
685 if (de_iir
& (DE_PLANEA_FLIP_DONE_IVB
<< (5 * i
))) {
686 intel_prepare_page_flip(dev
, i
);
687 intel_finish_page_flip_plane(dev
, i
);
691 /* check event from PCH */
692 if (de_iir
& DE_PCH_EVENT_IVB
) {
693 u32 pch_iir
= I915_READ(SDEIIR
);
695 if (pch_iir
& SDE_HOTPLUG_MASK_CPT
)
696 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
697 cpt_irq_handler(dev
, pch_iir
);
699 /* clear PCH hotplug event before clear CPU irq */
700 I915_WRITE(SDEIIR
, pch_iir
);
703 I915_WRITE(DEIIR
, de_iir
);
707 pm_iir
= I915_READ(GEN6_PMIIR
);
709 if (pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
710 gen6_queue_rps_work(dev_priv
, pm_iir
);
711 I915_WRITE(GEN6_PMIIR
, pm_iir
);
715 I915_WRITE(DEIER
, de_ier
);
721 static void ilk_gt_irq_handler(struct drm_device
*dev
,
722 struct drm_i915_private
*dev_priv
,
725 if (gt_iir
& (GT_USER_INTERRUPT
| GT_PIPE_NOTIFY
))
726 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
727 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
728 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
731 static irqreturn_t
ironlake_irq_handler(DRM_IRQ_ARGS
)
733 struct drm_device
*dev
= (struct drm_device
*) arg
;
734 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
736 u32 de_iir
, gt_iir
, de_ier
, pch_iir
, pm_iir
;
739 atomic_inc(&dev_priv
->irq_received
);
741 /* disable master interrupt before clearing iir */
742 de_ier
= I915_READ(DEIER
);
743 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
746 de_iir
= I915_READ(DEIIR
);
747 gt_iir
= I915_READ(GTIIR
);
748 pch_iir
= I915_READ(SDEIIR
);
749 pm_iir
= I915_READ(GEN6_PMIIR
);
751 if (de_iir
== 0 && gt_iir
== 0 && pch_iir
== 0 &&
752 (!IS_GEN6(dev
) || pm_iir
== 0))
755 if (HAS_PCH_CPT(dev
))
756 hotplug_mask
= SDE_HOTPLUG_MASK_CPT
;
758 hotplug_mask
= SDE_HOTPLUG_MASK
;
763 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
765 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
768 intel_opregion_gse_intr(dev
);
770 if (de_iir
& DE_PIPEA_VBLANK
)
771 drm_handle_vblank(dev
, 0);
773 if (de_iir
& DE_PIPEB_VBLANK
)
774 drm_handle_vblank(dev
, 1);
776 if (de_iir
& DE_PLANEA_FLIP_DONE
) {
777 intel_prepare_page_flip(dev
, 0);
778 intel_finish_page_flip_plane(dev
, 0);
781 if (de_iir
& DE_PLANEB_FLIP_DONE
) {
782 intel_prepare_page_flip(dev
, 1);
783 intel_finish_page_flip_plane(dev
, 1);
786 /* check event from PCH */
787 if (de_iir
& DE_PCH_EVENT
) {
788 if (pch_iir
& hotplug_mask
)
789 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
790 if (HAS_PCH_CPT(dev
))
791 cpt_irq_handler(dev
, pch_iir
);
793 ibx_irq_handler(dev
, pch_iir
);
796 if (de_iir
& DE_PCU_EVENT
) {
797 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
798 i915_handle_rps_change(dev
);
801 if (IS_GEN6(dev
) && pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
802 gen6_queue_rps_work(dev_priv
, pm_iir
);
804 /* should clear PCH hotplug event before clear CPU irq */
805 I915_WRITE(SDEIIR
, pch_iir
);
806 I915_WRITE(GTIIR
, gt_iir
);
807 I915_WRITE(DEIIR
, de_iir
);
808 I915_WRITE(GEN6_PMIIR
, pm_iir
);
811 I915_WRITE(DEIER
, de_ier
);
818 * i915_error_work_func - do process context error handling work
821 * Fire an error uevent so userspace can see that a hang or error
824 static void i915_error_work_func(struct work_struct
*work
)
826 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
828 struct drm_device
*dev
= dev_priv
->dev
;
829 char *error_event
[] = { "ERROR=1", NULL
};
830 char *reset_event
[] = { "RESET=1", NULL
};
831 char *reset_done_event
[] = { "ERROR=0", NULL
};
833 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, error_event
);
835 if (atomic_read(&dev_priv
->mm
.wedged
)) {
836 DRM_DEBUG_DRIVER("resetting chip\n");
837 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_event
);
838 if (!i915_reset(dev
)) {
839 atomic_set(&dev_priv
->mm
.wedged
, 0);
840 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_done_event
);
842 complete_all(&dev_priv
->error_completion
);
846 #ifdef CONFIG_DEBUG_FS
847 static struct drm_i915_error_object
*
848 i915_error_object_create(struct drm_i915_private
*dev_priv
,
849 struct drm_i915_gem_object
*src
)
851 struct drm_i915_error_object
*dst
;
852 int page
, page_count
;
855 if (src
== NULL
|| src
->pages
== NULL
)
858 page_count
= src
->base
.size
/ PAGE_SIZE
;
860 dst
= kmalloc(sizeof(*dst
) + page_count
* sizeof(u32
*), GFP_ATOMIC
);
864 reloc_offset
= src
->gtt_offset
;
865 for (page
= 0; page
< page_count
; page
++) {
869 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
873 local_irq_save(flags
);
874 if (reloc_offset
< dev_priv
->mm
.gtt_mappable_end
&&
875 src
->has_global_gtt_mapping
) {
878 /* Simply ignore tiling or any overlapping fence.
879 * It's part of the error state, and this hopefully
880 * captures what the GPU read.
883 s
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
885 memcpy_fromio(d
, s
, PAGE_SIZE
);
886 io_mapping_unmap_atomic(s
);
890 drm_clflush_pages(&src
->pages
[page
], 1);
892 s
= kmap_atomic(src
->pages
[page
]);
893 memcpy(d
, s
, PAGE_SIZE
);
896 drm_clflush_pages(&src
->pages
[page
], 1);
898 local_irq_restore(flags
);
900 dst
->pages
[page
] = d
;
902 reloc_offset
+= PAGE_SIZE
;
904 dst
->page_count
= page_count
;
905 dst
->gtt_offset
= src
->gtt_offset
;
911 kfree(dst
->pages
[page
]);
917 i915_error_object_free(struct drm_i915_error_object
*obj
)
924 for (page
= 0; page
< obj
->page_count
; page
++)
925 kfree(obj
->pages
[page
]);
931 i915_error_state_free(struct kref
*error_ref
)
933 struct drm_i915_error_state
*error
= container_of(error_ref
,
934 typeof(*error
), ref
);
937 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
938 i915_error_object_free(error
->ring
[i
].batchbuffer
);
939 i915_error_object_free(error
->ring
[i
].ringbuffer
);
940 kfree(error
->ring
[i
].requests
);
943 kfree(error
->active_bo
);
944 kfree(error
->overlay
);
947 static void capture_bo(struct drm_i915_error_buffer
*err
,
948 struct drm_i915_gem_object
*obj
)
950 err
->size
= obj
->base
.size
;
951 err
->name
= obj
->base
.name
;
952 err
->seqno
= obj
->last_rendering_seqno
;
953 err
->gtt_offset
= obj
->gtt_offset
;
954 err
->read_domains
= obj
->base
.read_domains
;
955 err
->write_domain
= obj
->base
.write_domain
;
956 err
->fence_reg
= obj
->fence_reg
;
958 if (obj
->pin_count
> 0)
960 if (obj
->user_pin_count
> 0)
962 err
->tiling
= obj
->tiling_mode
;
963 err
->dirty
= obj
->dirty
;
964 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
965 err
->ring
= obj
->ring
? obj
->ring
->id
: -1;
966 err
->cache_level
= obj
->cache_level
;
969 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
970 int count
, struct list_head
*head
)
972 struct drm_i915_gem_object
*obj
;
975 list_for_each_entry(obj
, head
, mm_list
) {
976 capture_bo(err
++, obj
);
984 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
985 int count
, struct list_head
*head
)
987 struct drm_i915_gem_object
*obj
;
990 list_for_each_entry(obj
, head
, gtt_list
) {
991 if (obj
->pin_count
== 0)
994 capture_bo(err
++, obj
);
1002 static void i915_gem_record_fences(struct drm_device
*dev
,
1003 struct drm_i915_error_state
*error
)
1005 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1009 switch (INTEL_INFO(dev
)->gen
) {
1012 for (i
= 0; i
< 16; i
++)
1013 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+ (i
* 8));
1017 for (i
= 0; i
< 16; i
++)
1018 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+ (i
* 8));
1021 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
1022 for (i
= 0; i
< 8; i
++)
1023 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+ (i
* 4));
1025 for (i
= 0; i
< 8; i
++)
1026 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
1032 static struct drm_i915_error_object
*
1033 i915_error_first_batchbuffer(struct drm_i915_private
*dev_priv
,
1034 struct intel_ring_buffer
*ring
)
1036 struct drm_i915_gem_object
*obj
;
1039 if (!ring
->get_seqno
)
1042 seqno
= ring
->get_seqno(ring
);
1043 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
) {
1044 if (obj
->ring
!= ring
)
1047 if (i915_seqno_passed(seqno
, obj
->last_rendering_seqno
))
1050 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_COMMAND
) == 0)
1053 /* We need to copy these to an anonymous buffer as the simplest
1054 * method to avoid being overwritten by userspace.
1056 return i915_error_object_create(dev_priv
, obj
);
1062 static void i915_record_ring_state(struct drm_device
*dev
,
1063 struct drm_i915_error_state
*error
,
1064 struct intel_ring_buffer
*ring
)
1066 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1068 if (INTEL_INFO(dev
)->gen
>= 6) {
1069 error
->rc_psmi
[ring
->id
] = I915_READ(ring
->mmio_base
+ 0x50);
1070 error
->fault_reg
[ring
->id
] = I915_READ(RING_FAULT_REG(ring
));
1071 error
->semaphore_mboxes
[ring
->id
][0]
1072 = I915_READ(RING_SYNC_0(ring
->mmio_base
));
1073 error
->semaphore_mboxes
[ring
->id
][1]
1074 = I915_READ(RING_SYNC_1(ring
->mmio_base
));
1077 if (INTEL_INFO(dev
)->gen
>= 4) {
1078 error
->faddr
[ring
->id
] = I915_READ(RING_DMA_FADD(ring
->mmio_base
));
1079 error
->ipeir
[ring
->id
] = I915_READ(RING_IPEIR(ring
->mmio_base
));
1080 error
->ipehr
[ring
->id
] = I915_READ(RING_IPEHR(ring
->mmio_base
));
1081 error
->instdone
[ring
->id
] = I915_READ(RING_INSTDONE(ring
->mmio_base
));
1082 error
->instps
[ring
->id
] = I915_READ(RING_INSTPS(ring
->mmio_base
));
1083 if (ring
->id
== RCS
) {
1084 error
->instdone1
= I915_READ(INSTDONE1
);
1085 error
->bbaddr
= I915_READ64(BB_ADDR
);
1088 error
->faddr
[ring
->id
] = I915_READ(DMA_FADD_I8XX
);
1089 error
->ipeir
[ring
->id
] = I915_READ(IPEIR
);
1090 error
->ipehr
[ring
->id
] = I915_READ(IPEHR
);
1091 error
->instdone
[ring
->id
] = I915_READ(INSTDONE
);
1094 error
->waiting
[ring
->id
] = waitqueue_active(&ring
->irq_queue
);
1095 error
->instpm
[ring
->id
] = I915_READ(RING_INSTPM(ring
->mmio_base
));
1096 error
->seqno
[ring
->id
] = ring
->get_seqno(ring
);
1097 error
->acthd
[ring
->id
] = intel_ring_get_active_head(ring
);
1098 error
->head
[ring
->id
] = I915_READ_HEAD(ring
);
1099 error
->tail
[ring
->id
] = I915_READ_TAIL(ring
);
1101 error
->cpu_ring_head
[ring
->id
] = ring
->head
;
1102 error
->cpu_ring_tail
[ring
->id
] = ring
->tail
;
1105 static void i915_gem_record_rings(struct drm_device
*dev
,
1106 struct drm_i915_error_state
*error
)
1108 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1109 struct intel_ring_buffer
*ring
;
1110 struct drm_i915_gem_request
*request
;
1113 for_each_ring(ring
, dev_priv
, i
) {
1114 i915_record_ring_state(dev
, error
, ring
);
1116 error
->ring
[i
].batchbuffer
=
1117 i915_error_first_batchbuffer(dev_priv
, ring
);
1119 error
->ring
[i
].ringbuffer
=
1120 i915_error_object_create(dev_priv
, ring
->obj
);
1123 list_for_each_entry(request
, &ring
->request_list
, list
)
1126 error
->ring
[i
].num_requests
= count
;
1127 error
->ring
[i
].requests
=
1128 kmalloc(count
*sizeof(struct drm_i915_error_request
),
1130 if (error
->ring
[i
].requests
== NULL
) {
1131 error
->ring
[i
].num_requests
= 0;
1136 list_for_each_entry(request
, &ring
->request_list
, list
) {
1137 struct drm_i915_error_request
*erq
;
1139 erq
= &error
->ring
[i
].requests
[count
++];
1140 erq
->seqno
= request
->seqno
;
1141 erq
->jiffies
= request
->emitted_jiffies
;
1142 erq
->tail
= request
->tail
;
1148 * i915_capture_error_state - capture an error record for later analysis
1151 * Should be called when an error is detected (either a hang or an error
1152 * interrupt) to capture error state from the time of the error. Fills
1153 * out a structure which becomes available in debugfs for user level tools
1156 static void i915_capture_error_state(struct drm_device
*dev
)
1158 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1159 struct drm_i915_gem_object
*obj
;
1160 struct drm_i915_error_state
*error
;
1161 unsigned long flags
;
1164 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
1165 error
= dev_priv
->first_error
;
1166 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
1170 /* Account for pipe specific data like PIPE*STAT */
1171 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1173 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1177 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1178 dev
->primary
->index
);
1180 kref_init(&error
->ref
);
1181 error
->eir
= I915_READ(EIR
);
1182 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1183 error
->ccid
= I915_READ(CCID
);
1185 if (HAS_PCH_SPLIT(dev
))
1186 error
->ier
= I915_READ(DEIER
) | I915_READ(GTIER
);
1187 else if (IS_VALLEYVIEW(dev
))
1188 error
->ier
= I915_READ(GTIER
) | I915_READ(VLV_IER
);
1189 else if (IS_GEN2(dev
))
1190 error
->ier
= I915_READ16(IER
);
1192 error
->ier
= I915_READ(IER
);
1195 error
->pipestat
[pipe
] = I915_READ(PIPESTAT(pipe
));
1197 if (INTEL_INFO(dev
)->gen
>= 6) {
1198 error
->error
= I915_READ(ERROR_GEN6
);
1199 error
->done_reg
= I915_READ(DONE_REG
);
1202 i915_gem_record_fences(dev
, error
);
1203 i915_gem_record_rings(dev
, error
);
1205 /* Record buffers on the active and pinned lists. */
1206 error
->active_bo
= NULL
;
1207 error
->pinned_bo
= NULL
;
1210 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
)
1212 error
->active_bo_count
= i
;
1213 list_for_each_entry(obj
, &dev_priv
->mm
.gtt_list
, gtt_list
)
1216 error
->pinned_bo_count
= i
- error
->active_bo_count
;
1218 error
->active_bo
= NULL
;
1219 error
->pinned_bo
= NULL
;
1221 error
->active_bo
= kmalloc(sizeof(*error
->active_bo
)*i
,
1223 if (error
->active_bo
)
1225 error
->active_bo
+ error
->active_bo_count
;
1228 if (error
->active_bo
)
1229 error
->active_bo_count
=
1230 capture_active_bo(error
->active_bo
,
1231 error
->active_bo_count
,
1232 &dev_priv
->mm
.active_list
);
1234 if (error
->pinned_bo
)
1235 error
->pinned_bo_count
=
1236 capture_pinned_bo(error
->pinned_bo
,
1237 error
->pinned_bo_count
,
1238 &dev_priv
->mm
.gtt_list
);
1240 do_gettimeofday(&error
->time
);
1242 error
->overlay
= intel_overlay_capture_error_state(dev
);
1243 error
->display
= intel_display_capture_error_state(dev
);
1245 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
1246 if (dev_priv
->first_error
== NULL
) {
1247 dev_priv
->first_error
= error
;
1250 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
1253 i915_error_state_free(&error
->ref
);
1256 void i915_destroy_error_state(struct drm_device
*dev
)
1258 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1259 struct drm_i915_error_state
*error
;
1260 unsigned long flags
;
1262 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
1263 error
= dev_priv
->first_error
;
1264 dev_priv
->first_error
= NULL
;
1265 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
1268 kref_put(&error
->ref
, i915_error_state_free
);
1271 #define i915_capture_error_state(x)
1274 static void i915_report_and_clear_eir(struct drm_device
*dev
)
1276 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1277 u32 eir
= I915_READ(EIR
);
1283 pr_err("render error detected, EIR: 0x%08x\n", eir
);
1286 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
1287 u32 ipeir
= I915_READ(IPEIR_I965
);
1289 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1290 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1291 pr_err(" INSTDONE: 0x%08x\n",
1292 I915_READ(INSTDONE_I965
));
1293 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1294 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1
));
1295 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1296 I915_WRITE(IPEIR_I965
, ipeir
);
1297 POSTING_READ(IPEIR_I965
);
1299 if (eir
& GM45_ERROR_PAGE_TABLE
) {
1300 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1301 pr_err("page table error\n");
1302 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1303 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1304 POSTING_READ(PGTBL_ER
);
1308 if (!IS_GEN2(dev
)) {
1309 if (eir
& I915_ERROR_PAGE_TABLE
) {
1310 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1311 pr_err("page table error\n");
1312 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1313 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1314 POSTING_READ(PGTBL_ER
);
1318 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
1319 pr_err("memory refresh error:\n");
1321 pr_err("pipe %c stat: 0x%08x\n",
1322 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
1323 /* pipestat has already been acked */
1325 if (eir
& I915_ERROR_INSTRUCTION
) {
1326 pr_err("instruction error\n");
1327 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
1328 if (INTEL_INFO(dev
)->gen
< 4) {
1329 u32 ipeir
= I915_READ(IPEIR
);
1331 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
1332 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
1333 pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE
));
1334 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
1335 I915_WRITE(IPEIR
, ipeir
);
1336 POSTING_READ(IPEIR
);
1338 u32 ipeir
= I915_READ(IPEIR_I965
);
1340 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1341 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1342 pr_err(" INSTDONE: 0x%08x\n",
1343 I915_READ(INSTDONE_I965
));
1344 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1345 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1
));
1346 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1347 I915_WRITE(IPEIR_I965
, ipeir
);
1348 POSTING_READ(IPEIR_I965
);
1352 I915_WRITE(EIR
, eir
);
1354 eir
= I915_READ(EIR
);
1357 * some errors might have become stuck,
1360 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
1361 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
1362 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
1367 * i915_handle_error - handle an error interrupt
1370 * Do some basic checking of regsiter state at error interrupt time and
1371 * dump it to the syslog. Also call i915_capture_error_state() to make
1372 * sure we get a record and make it available in debugfs. Fire a uevent
1373 * so userspace knows something bad happened (should trigger collection
1374 * of a ring dump etc.).
1376 void i915_handle_error(struct drm_device
*dev
, bool wedged
)
1378 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1379 struct intel_ring_buffer
*ring
;
1382 i915_capture_error_state(dev
);
1383 i915_report_and_clear_eir(dev
);
1386 INIT_COMPLETION(dev_priv
->error_completion
);
1387 atomic_set(&dev_priv
->mm
.wedged
, 1);
1390 * Wakeup waiting processes so they don't hang
1392 for_each_ring(ring
, dev_priv
, i
)
1393 wake_up_all(&ring
->irq_queue
);
1396 queue_work(dev_priv
->wq
, &dev_priv
->error_work
);
1399 static void i915_pageflip_stall_check(struct drm_device
*dev
, int pipe
)
1401 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1402 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
1403 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1404 struct drm_i915_gem_object
*obj
;
1405 struct intel_unpin_work
*work
;
1406 unsigned long flags
;
1407 bool stall_detected
;
1409 /* Ignore early vblank irqs */
1410 if (intel_crtc
== NULL
)
1413 spin_lock_irqsave(&dev
->event_lock
, flags
);
1414 work
= intel_crtc
->unpin_work
;
1416 if (work
== NULL
|| work
->pending
|| !work
->enable_stall_check
) {
1417 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1418 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1422 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1423 obj
= work
->pending_flip_obj
;
1424 if (INTEL_INFO(dev
)->gen
>= 4) {
1425 int dspsurf
= DSPSURF(intel_crtc
->plane
);
1426 stall_detected
= I915_HI_DISPBASE(I915_READ(dspsurf
)) ==
1429 int dspaddr
= DSPADDR(intel_crtc
->plane
);
1430 stall_detected
= I915_READ(dspaddr
) == (obj
->gtt_offset
+
1431 crtc
->y
* crtc
->fb
->pitches
[0] +
1432 crtc
->x
* crtc
->fb
->bits_per_pixel
/8);
1435 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1437 if (stall_detected
) {
1438 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1439 intel_prepare_page_flip(dev
, intel_crtc
->plane
);
1443 /* Called from drm generic code, passed 'crtc' which
1444 * we use as a pipe index
1446 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
1448 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1449 unsigned long irqflags
;
1451 if (!i915_pipe_enabled(dev
, pipe
))
1454 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1455 if (INTEL_INFO(dev
)->gen
>= 4)
1456 i915_enable_pipestat(dev_priv
, pipe
,
1457 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1459 i915_enable_pipestat(dev_priv
, pipe
,
1460 PIPE_VBLANK_INTERRUPT_ENABLE
);
1462 /* maintain vblank delivery even in deep C-states */
1463 if (dev_priv
->info
->gen
== 3)
1464 I915_WRITE(INSTPM
, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS
));
1465 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1470 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
1472 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1473 unsigned long irqflags
;
1475 if (!i915_pipe_enabled(dev
, pipe
))
1478 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1479 ironlake_enable_display_irq(dev_priv
, (pipe
== 0) ?
1480 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1481 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1486 static int ivybridge_enable_vblank(struct drm_device
*dev
, int pipe
)
1488 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1489 unsigned long irqflags
;
1491 if (!i915_pipe_enabled(dev
, pipe
))
1494 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1495 ironlake_enable_display_irq(dev_priv
,
1496 DE_PIPEA_VBLANK_IVB
<< (5 * pipe
));
1497 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1502 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
1504 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1505 unsigned long irqflags
;
1508 if (!i915_pipe_enabled(dev
, pipe
))
1511 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1512 imr
= I915_READ(VLV_IMR
);
1514 imr
&= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1516 imr
&= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1517 I915_WRITE(VLV_IMR
, imr
);
1518 i915_enable_pipestat(dev_priv
, pipe
,
1519 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1520 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1525 /* Called from drm generic code, passed 'crtc' which
1526 * we use as a pipe index
1528 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
1530 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1531 unsigned long irqflags
;
1533 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1534 if (dev_priv
->info
->gen
== 3)
1535 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS
));
1537 i915_disable_pipestat(dev_priv
, pipe
,
1538 PIPE_VBLANK_INTERRUPT_ENABLE
|
1539 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1540 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1543 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
1545 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1546 unsigned long irqflags
;
1548 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1549 ironlake_disable_display_irq(dev_priv
, (pipe
== 0) ?
1550 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1551 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1554 static void ivybridge_disable_vblank(struct drm_device
*dev
, int pipe
)
1556 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1557 unsigned long irqflags
;
1559 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1560 ironlake_disable_display_irq(dev_priv
,
1561 DE_PIPEA_VBLANK_IVB
<< (pipe
* 5));
1562 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1565 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
1567 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1568 unsigned long irqflags
;
1571 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1572 i915_disable_pipestat(dev_priv
, pipe
,
1573 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1574 imr
= I915_READ(VLV_IMR
);
1576 imr
|= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1578 imr
|= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1579 I915_WRITE(VLV_IMR
, imr
);
1580 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1584 ring_last_seqno(struct intel_ring_buffer
*ring
)
1586 return list_entry(ring
->request_list
.prev
,
1587 struct drm_i915_gem_request
, list
)->seqno
;
1590 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer
*ring
, bool *err
)
1592 if (list_empty(&ring
->request_list
) ||
1593 i915_seqno_passed(ring
->get_seqno(ring
), ring_last_seqno(ring
))) {
1594 /* Issue a wake-up to catch stuck h/w. */
1595 if (waitqueue_active(&ring
->irq_queue
)) {
1596 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1598 wake_up_all(&ring
->irq_queue
);
1606 static bool kick_ring(struct intel_ring_buffer
*ring
)
1608 struct drm_device
*dev
= ring
->dev
;
1609 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1610 u32 tmp
= I915_READ_CTL(ring
);
1611 if (tmp
& RING_WAIT
) {
1612 DRM_ERROR("Kicking stuck wait on %s\n",
1614 I915_WRITE_CTL(ring
, tmp
);
1620 static bool i915_hangcheck_hung(struct drm_device
*dev
)
1622 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1624 if (dev_priv
->hangcheck_count
++ > 1) {
1627 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1628 i915_handle_error(dev
, true);
1630 if (!IS_GEN2(dev
)) {
1631 struct intel_ring_buffer
*ring
;
1634 /* Is the chip hanging on a WAIT_FOR_EVENT?
1635 * If so we can simply poke the RB_WAIT bit
1636 * and break the hang. This should work on
1637 * all but the second generation chipsets.
1639 for_each_ring(ring
, dev_priv
, i
)
1640 hung
&= !kick_ring(ring
);
1650 * This is called when the chip hasn't reported back with completed
1651 * batchbuffers in a long time. The first time this is called we simply record
1652 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1653 * again, we assume the chip is wedged and try to fix it.
1655 void i915_hangcheck_elapsed(unsigned long data
)
1657 struct drm_device
*dev
= (struct drm_device
*)data
;
1658 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1659 uint32_t acthd
[I915_NUM_RINGS
], instdone
, instdone1
;
1660 struct intel_ring_buffer
*ring
;
1661 bool err
= false, idle
;
1664 if (!i915_enable_hangcheck
)
1667 memset(acthd
, 0, sizeof(acthd
));
1669 for_each_ring(ring
, dev_priv
, i
) {
1670 idle
&= i915_hangcheck_ring_idle(ring
, &err
);
1671 acthd
[i
] = intel_ring_get_active_head(ring
);
1674 /* If all work is done then ACTHD clearly hasn't advanced. */
1677 if (i915_hangcheck_hung(dev
))
1683 dev_priv
->hangcheck_count
= 0;
1687 if (INTEL_INFO(dev
)->gen
< 4) {
1688 instdone
= I915_READ(INSTDONE
);
1691 instdone
= I915_READ(INSTDONE_I965
);
1692 instdone1
= I915_READ(INSTDONE1
);
1695 if (memcmp(dev_priv
->last_acthd
, acthd
, sizeof(acthd
)) == 0 &&
1696 dev_priv
->last_instdone
== instdone
&&
1697 dev_priv
->last_instdone1
== instdone1
) {
1698 if (i915_hangcheck_hung(dev
))
1701 dev_priv
->hangcheck_count
= 0;
1703 memcpy(dev_priv
->last_acthd
, acthd
, sizeof(acthd
));
1704 dev_priv
->last_instdone
= instdone
;
1705 dev_priv
->last_instdone1
= instdone1
;
1709 /* Reset timer case chip hangs without another request being added */
1710 mod_timer(&dev_priv
->hangcheck_timer
,
1711 jiffies
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD
));
1716 static void ironlake_irq_preinstall(struct drm_device
*dev
)
1718 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1720 atomic_set(&dev_priv
->irq_received
, 0);
1722 I915_WRITE(HWSTAM
, 0xeffe);
1724 /* XXX hotplug from PCH */
1726 I915_WRITE(DEIMR
, 0xffffffff);
1727 I915_WRITE(DEIER
, 0x0);
1728 POSTING_READ(DEIER
);
1731 I915_WRITE(GTIMR
, 0xffffffff);
1732 I915_WRITE(GTIER
, 0x0);
1733 POSTING_READ(GTIER
);
1735 /* south display irq */
1736 I915_WRITE(SDEIMR
, 0xffffffff);
1737 I915_WRITE(SDEIER
, 0x0);
1738 POSTING_READ(SDEIER
);
1741 static void valleyview_irq_preinstall(struct drm_device
*dev
)
1743 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1746 atomic_set(&dev_priv
->irq_received
, 0);
1749 I915_WRITE(VLV_IMR
, 0);
1750 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
1751 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
1752 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
1755 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1756 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1757 I915_WRITE(GTIMR
, 0xffffffff);
1758 I915_WRITE(GTIER
, 0x0);
1759 POSTING_READ(GTIER
);
1761 I915_WRITE(DPINVGTT
, 0xff);
1763 I915_WRITE(PORT_HOTPLUG_EN
, 0);
1764 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
1766 I915_WRITE(PIPESTAT(pipe
), 0xffff);
1767 I915_WRITE(VLV_IIR
, 0xffffffff);
1768 I915_WRITE(VLV_IMR
, 0xffffffff);
1769 I915_WRITE(VLV_IER
, 0x0);
1770 POSTING_READ(VLV_IER
);
1774 * Enable digital hotplug on the PCH, and configure the DP short pulse
1775 * duration to 2ms (which is the minimum in the Display Port spec)
1777 * This register is the same on all known PCH chips.
1780 static void ironlake_enable_pch_hotplug(struct drm_device
*dev
)
1782 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1785 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
1786 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
1787 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
1788 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
1789 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
1790 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
1793 static int ironlake_irq_postinstall(struct drm_device
*dev
)
1795 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1796 /* enable kind of interrupts always enabled */
1797 u32 display_mask
= DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
1798 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
;
1802 dev_priv
->irq_mask
= ~display_mask
;
1804 /* should always can generate irq */
1805 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
1806 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
1807 I915_WRITE(DEIER
, display_mask
| DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
);
1808 POSTING_READ(DEIER
);
1810 dev_priv
->gt_irq_mask
= ~0;
1812 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1813 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
1818 GEN6_BSD_USER_INTERRUPT
|
1819 GEN6_BLITTER_USER_INTERRUPT
;
1824 GT_BSD_USER_INTERRUPT
;
1825 I915_WRITE(GTIER
, render_irqs
);
1826 POSTING_READ(GTIER
);
1828 if (HAS_PCH_CPT(dev
)) {
1829 hotplug_mask
= (SDE_CRT_HOTPLUG_CPT
|
1830 SDE_PORTB_HOTPLUG_CPT
|
1831 SDE_PORTC_HOTPLUG_CPT
|
1832 SDE_PORTD_HOTPLUG_CPT
);
1834 hotplug_mask
= (SDE_CRT_HOTPLUG
|
1841 dev_priv
->pch_irq_mask
= ~hotplug_mask
;
1843 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
1844 I915_WRITE(SDEIMR
, dev_priv
->pch_irq_mask
);
1845 I915_WRITE(SDEIER
, hotplug_mask
);
1846 POSTING_READ(SDEIER
);
1848 ironlake_enable_pch_hotplug(dev
);
1850 if (IS_IRONLAKE_M(dev
)) {
1851 /* Clear & enable PCU event interrupts */
1852 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
1853 I915_WRITE(DEIER
, I915_READ(DEIER
) | DE_PCU_EVENT
);
1854 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
1860 static int ivybridge_irq_postinstall(struct drm_device
*dev
)
1862 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1863 /* enable kind of interrupts always enabled */
1865 DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
| DE_PCH_EVENT_IVB
|
1866 DE_PLANEC_FLIP_DONE_IVB
|
1867 DE_PLANEB_FLIP_DONE_IVB
|
1868 DE_PLANEA_FLIP_DONE_IVB
;
1872 dev_priv
->irq_mask
= ~display_mask
;
1874 /* should always can generate irq */
1875 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
1876 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
1879 DE_PIPEC_VBLANK_IVB
|
1880 DE_PIPEB_VBLANK_IVB
|
1881 DE_PIPEA_VBLANK_IVB
);
1882 POSTING_READ(DEIER
);
1884 dev_priv
->gt_irq_mask
= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
1886 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1887 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
1889 render_irqs
= GT_USER_INTERRUPT
| GEN6_BSD_USER_INTERRUPT
|
1890 GEN6_BLITTER_USER_INTERRUPT
| GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
1891 I915_WRITE(GTIER
, render_irqs
);
1892 POSTING_READ(GTIER
);
1894 hotplug_mask
= (SDE_CRT_HOTPLUG_CPT
|
1895 SDE_PORTB_HOTPLUG_CPT
|
1896 SDE_PORTC_HOTPLUG_CPT
|
1897 SDE_PORTD_HOTPLUG_CPT
);
1898 dev_priv
->pch_irq_mask
= ~hotplug_mask
;
1900 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
1901 I915_WRITE(SDEIMR
, dev_priv
->pch_irq_mask
);
1902 I915_WRITE(SDEIER
, hotplug_mask
);
1903 POSTING_READ(SDEIER
);
1905 ironlake_enable_pch_hotplug(dev
);
1910 static int valleyview_irq_postinstall(struct drm_device
*dev
)
1912 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1914 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
1915 u32 pipestat_enable
= PLANE_FLIP_DONE_INT_EN_VLV
;
1918 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
;
1919 enable_mask
|= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
1920 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
1921 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
1922 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1925 *Leave vblank interrupts masked initially. enable/disable will
1926 * toggle them based on usage.
1928 dev_priv
->irq_mask
= (~enable_mask
) |
1929 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
1930 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1932 dev_priv
->pipestat
[0] = 0;
1933 dev_priv
->pipestat
[1] = 0;
1935 /* Hack for broken MSIs on VLV */
1936 pci_write_config_dword(dev_priv
->dev
->pdev
, 0x94, 0xfee00000);
1937 pci_read_config_word(dev
->pdev
, 0x98, &msid
);
1938 msid
&= 0xff; /* mask out delivery bits */
1940 pci_write_config_word(dev_priv
->dev
->pdev
, 0x98, msid
);
1942 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
1943 I915_WRITE(VLV_IER
, enable_mask
);
1944 I915_WRITE(VLV_IIR
, 0xffffffff);
1945 I915_WRITE(PIPESTAT(0), 0xffff);
1946 I915_WRITE(PIPESTAT(1), 0xffff);
1947 POSTING_READ(VLV_IER
);
1949 i915_enable_pipestat(dev_priv
, 0, pipestat_enable
);
1950 i915_enable_pipestat(dev_priv
, 1, pipestat_enable
);
1952 I915_WRITE(VLV_IIR
, 0xffffffff);
1953 I915_WRITE(VLV_IIR
, 0xffffffff);
1955 dev_priv
->gt_irq_mask
= ~0;
1957 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1958 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1959 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
1960 I915_WRITE(GTIER
, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT
|
1961 GT_GEN6_BLT_CS_ERROR_INTERRUPT
|
1962 GT_GEN6_BLT_USER_INTERRUPT
|
1963 GT_GEN6_BSD_USER_INTERRUPT
|
1964 GT_GEN6_BSD_CS_ERROR_INTERRUPT
|
1965 GT_GEN7_L3_PARITY_ERROR_INTERRUPT
|
1967 GT_RENDER_CS_ERROR_INTERRUPT
|
1970 POSTING_READ(GTIER
);
1972 /* ack & enable invalid PTE error interrupts */
1973 #if 0 /* FIXME: add support to irq handler for checking these bits */
1974 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
1975 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
1978 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
1979 #if 0 /* FIXME: check register definitions; some have moved */
1980 /* Note HDMI and DP share bits */
1981 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
1982 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
1983 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
1984 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
1985 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
1986 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
1987 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS
)
1988 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
1989 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS
)
1990 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
1991 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
1992 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
1993 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
1997 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2002 static void valleyview_irq_uninstall(struct drm_device
*dev
)
2004 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2011 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2013 I915_WRITE(HWSTAM
, 0xffffffff);
2014 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2015 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2017 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2018 I915_WRITE(VLV_IIR
, 0xffffffff);
2019 I915_WRITE(VLV_IMR
, 0xffffffff);
2020 I915_WRITE(VLV_IER
, 0x0);
2021 POSTING_READ(VLV_IER
);
2024 static void ironlake_irq_uninstall(struct drm_device
*dev
)
2026 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2031 I915_WRITE(HWSTAM
, 0xffffffff);
2033 I915_WRITE(DEIMR
, 0xffffffff);
2034 I915_WRITE(DEIER
, 0x0);
2035 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2037 I915_WRITE(GTIMR
, 0xffffffff);
2038 I915_WRITE(GTIER
, 0x0);
2039 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2041 I915_WRITE(SDEIMR
, 0xffffffff);
2042 I915_WRITE(SDEIER
, 0x0);
2043 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2046 static void i8xx_irq_preinstall(struct drm_device
* dev
)
2048 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2051 atomic_set(&dev_priv
->irq_received
, 0);
2054 I915_WRITE(PIPESTAT(pipe
), 0);
2055 I915_WRITE16(IMR
, 0xffff);
2056 I915_WRITE16(IER
, 0x0);
2057 POSTING_READ16(IER
);
2060 static int i8xx_irq_postinstall(struct drm_device
*dev
)
2062 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2064 dev_priv
->pipestat
[0] = 0;
2065 dev_priv
->pipestat
[1] = 0;
2068 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2070 /* Unmask the interrupts that we always want on. */
2071 dev_priv
->irq_mask
=
2072 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2073 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2074 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2075 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2076 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2077 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
2080 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2081 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2082 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2083 I915_USER_INTERRUPT
);
2084 POSTING_READ16(IER
);
2089 static irqreturn_t
i8xx_irq_handler(DRM_IRQ_ARGS
)
2091 struct drm_device
*dev
= (struct drm_device
*) arg
;
2092 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2095 unsigned long irqflags
;
2099 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2100 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2102 atomic_inc(&dev_priv
->irq_received
);
2104 iir
= I915_READ16(IIR
);
2108 while (iir
& ~flip_mask
) {
2109 /* Can't rely on pipestat interrupt bit in iir as it might
2110 * have been cleared after the pipestat interrupt was received.
2111 * It doesn't set the bit in iir again, but it still produces
2112 * interrupts (for non-MSI).
2114 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2115 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2116 i915_handle_error(dev
, false);
2118 for_each_pipe(pipe
) {
2119 int reg
= PIPESTAT(pipe
);
2120 pipe_stats
[pipe
] = I915_READ(reg
);
2123 * Clear the PIPE*STAT regs before the IIR
2125 if (pipe_stats
[pipe
] & 0x8000ffff) {
2126 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2127 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2129 I915_WRITE(reg
, pipe_stats
[pipe
]);
2133 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2135 I915_WRITE16(IIR
, iir
& ~flip_mask
);
2136 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
2138 i915_update_dri1_breadcrumb(dev
);
2140 if (iir
& I915_USER_INTERRUPT
)
2141 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2143 if (pipe_stats
[0] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2144 drm_handle_vblank(dev
, 0)) {
2145 if (iir
& I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
) {
2146 intel_prepare_page_flip(dev
, 0);
2147 intel_finish_page_flip(dev
, 0);
2148 flip_mask
&= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
;
2152 if (pipe_stats
[1] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2153 drm_handle_vblank(dev
, 1)) {
2154 if (iir
& I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
) {
2155 intel_prepare_page_flip(dev
, 1);
2156 intel_finish_page_flip(dev
, 1);
2157 flip_mask
&= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2167 static void i8xx_irq_uninstall(struct drm_device
* dev
)
2169 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2172 for_each_pipe(pipe
) {
2173 /* Clear enable bits; then clear status bits */
2174 I915_WRITE(PIPESTAT(pipe
), 0);
2175 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2177 I915_WRITE16(IMR
, 0xffff);
2178 I915_WRITE16(IER
, 0x0);
2179 I915_WRITE16(IIR
, I915_READ16(IIR
));
2182 static void i915_irq_preinstall(struct drm_device
* dev
)
2184 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2187 atomic_set(&dev_priv
->irq_received
, 0);
2189 if (I915_HAS_HOTPLUG(dev
)) {
2190 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2191 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2194 I915_WRITE16(HWSTAM
, 0xeffe);
2196 I915_WRITE(PIPESTAT(pipe
), 0);
2197 I915_WRITE(IMR
, 0xffffffff);
2198 I915_WRITE(IER
, 0x0);
2202 static int i915_irq_postinstall(struct drm_device
*dev
)
2204 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2207 dev_priv
->pipestat
[0] = 0;
2208 dev_priv
->pipestat
[1] = 0;
2210 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2212 /* Unmask the interrupts that we always want on. */
2213 dev_priv
->irq_mask
=
2214 ~(I915_ASLE_INTERRUPT
|
2215 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2216 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2217 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2218 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2219 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2222 I915_ASLE_INTERRUPT
|
2223 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2224 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2225 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2226 I915_USER_INTERRUPT
;
2228 if (I915_HAS_HOTPLUG(dev
)) {
2229 /* Enable in IER... */
2230 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
2231 /* and unmask in IMR */
2232 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
2235 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2236 I915_WRITE(IER
, enable_mask
);
2239 if (I915_HAS_HOTPLUG(dev
)) {
2240 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
2242 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2243 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2244 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2245 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2246 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2247 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2248 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_I915
)
2249 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2250 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_I915
)
2251 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2252 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2253 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2254 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2257 /* Ignore TV since it's buggy */
2259 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2262 intel_opregion_enable_asle(dev
);
2267 static irqreturn_t
i915_irq_handler(DRM_IRQ_ARGS
)
2269 struct drm_device
*dev
= (struct drm_device
*) arg
;
2270 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2271 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
2272 unsigned long irqflags
;
2274 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2275 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2277 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
,
2278 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2280 int pipe
, ret
= IRQ_NONE
;
2282 atomic_inc(&dev_priv
->irq_received
);
2284 iir
= I915_READ(IIR
);
2286 bool irq_received
= (iir
& ~flip_mask
) != 0;
2287 bool blc_event
= false;
2289 /* Can't rely on pipestat interrupt bit in iir as it might
2290 * have been cleared after the pipestat interrupt was received.
2291 * It doesn't set the bit in iir again, but it still produces
2292 * interrupts (for non-MSI).
2294 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2295 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2296 i915_handle_error(dev
, false);
2298 for_each_pipe(pipe
) {
2299 int reg
= PIPESTAT(pipe
);
2300 pipe_stats
[pipe
] = I915_READ(reg
);
2302 /* Clear the PIPE*STAT regs before the IIR */
2303 if (pipe_stats
[pipe
] & 0x8000ffff) {
2304 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2305 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2307 I915_WRITE(reg
, pipe_stats
[pipe
]);
2308 irq_received
= true;
2311 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2316 /* Consume port. Then clear IIR or we'll miss events */
2317 if ((I915_HAS_HOTPLUG(dev
)) &&
2318 (iir
& I915_DISPLAY_PORT_INTERRUPT
)) {
2319 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2321 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2323 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
2324 queue_work(dev_priv
->wq
,
2325 &dev_priv
->hotplug_work
);
2327 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2328 POSTING_READ(PORT_HOTPLUG_STAT
);
2331 I915_WRITE(IIR
, iir
& ~flip_mask
);
2332 new_iir
= I915_READ(IIR
); /* Flush posted writes */
2334 if (iir
& I915_USER_INTERRUPT
)
2335 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2337 for_each_pipe(pipe
) {
2341 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2342 drm_handle_vblank(dev
, pipe
)) {
2343 if (iir
& flip
[plane
]) {
2344 intel_prepare_page_flip(dev
, plane
);
2345 intel_finish_page_flip(dev
, pipe
);
2346 flip_mask
&= ~flip
[plane
];
2350 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
2354 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
2355 intel_opregion_asle_intr(dev
);
2357 /* With MSI, interrupts are only generated when iir
2358 * transitions from zero to nonzero. If another bit got
2359 * set while we were handling the existing iir bits, then
2360 * we would never get another interrupt.
2362 * This is fine on non-MSI as well, as if we hit this path
2363 * we avoid exiting the interrupt handler only to generate
2366 * Note that for MSI this could cause a stray interrupt report
2367 * if an interrupt landed in the time between writing IIR and
2368 * the posting read. This should be rare enough to never
2369 * trigger the 99% of 100,000 interrupts test for disabling
2374 } while (iir
& ~flip_mask
);
2376 i915_update_dri1_breadcrumb(dev
);
2381 static void i915_irq_uninstall(struct drm_device
* dev
)
2383 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2386 if (I915_HAS_HOTPLUG(dev
)) {
2387 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2388 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2391 I915_WRITE16(HWSTAM
, 0xffff);
2392 for_each_pipe(pipe
) {
2393 /* Clear enable bits; then clear status bits */
2394 I915_WRITE(PIPESTAT(pipe
), 0);
2395 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2397 I915_WRITE(IMR
, 0xffffffff);
2398 I915_WRITE(IER
, 0x0);
2400 I915_WRITE(IIR
, I915_READ(IIR
));
2403 static void i965_irq_preinstall(struct drm_device
* dev
)
2405 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2408 atomic_set(&dev_priv
->irq_received
, 0);
2410 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2411 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2413 I915_WRITE(HWSTAM
, 0xeffe);
2415 I915_WRITE(PIPESTAT(pipe
), 0);
2416 I915_WRITE(IMR
, 0xffffffff);
2417 I915_WRITE(IER
, 0x0);
2421 static int i965_irq_postinstall(struct drm_device
*dev
)
2423 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2428 /* Unmask the interrupts that we always want on. */
2429 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
2430 I915_DISPLAY_PORT_INTERRUPT
|
2431 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2432 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2433 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2434 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2435 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2437 enable_mask
= ~dev_priv
->irq_mask
;
2438 enable_mask
|= I915_USER_INTERRUPT
;
2441 enable_mask
|= I915_BSD_USER_INTERRUPT
;
2443 dev_priv
->pipestat
[0] = 0;
2444 dev_priv
->pipestat
[1] = 0;
2447 * Enable some error detection, note the instruction error mask
2448 * bit is reserved, so we leave it masked.
2451 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
2452 GM45_ERROR_MEM_PRIV
|
2453 GM45_ERROR_CP_PRIV
|
2454 I915_ERROR_MEMORY_REFRESH
);
2456 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
2457 I915_ERROR_MEMORY_REFRESH
);
2459 I915_WRITE(EMR
, error_mask
);
2461 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2462 I915_WRITE(IER
, enable_mask
);
2465 /* Note HDMI and DP share hotplug bits */
2467 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2468 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2469 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2470 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2471 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2472 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2474 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_G4X
)
2475 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2476 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_G4X
)
2477 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2479 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_I965
)
2480 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2481 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_I965
)
2482 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2484 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2485 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2487 /* Programming the CRT detection parameters tends
2488 to generate a spurious hotplug event about three
2489 seconds later. So just do it once.
2492 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
2493 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2496 /* Ignore TV since it's buggy */
2498 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2500 intel_opregion_enable_asle(dev
);
2505 static irqreturn_t
i965_irq_handler(DRM_IRQ_ARGS
)
2507 struct drm_device
*dev
= (struct drm_device
*) arg
;
2508 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2510 u32 pipe_stats
[I915_MAX_PIPES
];
2511 unsigned long irqflags
;
2513 int ret
= IRQ_NONE
, pipe
;
2515 atomic_inc(&dev_priv
->irq_received
);
2517 iir
= I915_READ(IIR
);
2520 bool blc_event
= false;
2522 irq_received
= iir
!= 0;
2524 /* Can't rely on pipestat interrupt bit in iir as it might
2525 * have been cleared after the pipestat interrupt was received.
2526 * It doesn't set the bit in iir again, but it still produces
2527 * interrupts (for non-MSI).
2529 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2530 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2531 i915_handle_error(dev
, false);
2533 for_each_pipe(pipe
) {
2534 int reg
= PIPESTAT(pipe
);
2535 pipe_stats
[pipe
] = I915_READ(reg
);
2538 * Clear the PIPE*STAT regs before the IIR
2540 if (pipe_stats
[pipe
] & 0x8000ffff) {
2541 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2542 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2544 I915_WRITE(reg
, pipe_stats
[pipe
]);
2548 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2555 /* Consume port. Then clear IIR or we'll miss events */
2556 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
2557 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2559 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2561 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
2562 queue_work(dev_priv
->wq
,
2563 &dev_priv
->hotplug_work
);
2565 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2566 I915_READ(PORT_HOTPLUG_STAT
);
2569 I915_WRITE(IIR
, iir
);
2570 new_iir
= I915_READ(IIR
); /* Flush posted writes */
2572 if (iir
& I915_USER_INTERRUPT
)
2573 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2574 if (iir
& I915_BSD_USER_INTERRUPT
)
2575 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
2577 if (iir
& I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
)
2578 intel_prepare_page_flip(dev
, 0);
2580 if (iir
& I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
)
2581 intel_prepare_page_flip(dev
, 1);
2583 for_each_pipe(pipe
) {
2584 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
2585 drm_handle_vblank(dev
, pipe
)) {
2586 i915_pageflip_stall_check(dev
, pipe
);
2587 intel_finish_page_flip(dev
, pipe
);
2590 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
2595 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
2596 intel_opregion_asle_intr(dev
);
2598 /* With MSI, interrupts are only generated when iir
2599 * transitions from zero to nonzero. If another bit got
2600 * set while we were handling the existing iir bits, then
2601 * we would never get another interrupt.
2603 * This is fine on non-MSI as well, as if we hit this path
2604 * we avoid exiting the interrupt handler only to generate
2607 * Note that for MSI this could cause a stray interrupt report
2608 * if an interrupt landed in the time between writing IIR and
2609 * the posting read. This should be rare enough to never
2610 * trigger the 99% of 100,000 interrupts test for disabling
2616 i915_update_dri1_breadcrumb(dev
);
2621 static void i965_irq_uninstall(struct drm_device
* dev
)
2623 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2629 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2630 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2632 I915_WRITE(HWSTAM
, 0xffffffff);
2634 I915_WRITE(PIPESTAT(pipe
), 0);
2635 I915_WRITE(IMR
, 0xffffffff);
2636 I915_WRITE(IER
, 0x0);
2639 I915_WRITE(PIPESTAT(pipe
),
2640 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
2641 I915_WRITE(IIR
, I915_READ(IIR
));
2644 void intel_irq_init(struct drm_device
*dev
)
2646 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2648 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
2649 INIT_WORK(&dev_priv
->error_work
, i915_error_work_func
);
2650 INIT_WORK(&dev_priv
->rps_work
, gen6_pm_rps_work
);
2651 INIT_WORK(&dev_priv
->parity_error_work
, ivybridge_parity_work
);
2653 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
2654 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
2655 if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
2656 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
2657 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
2660 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
2661 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
2663 dev
->driver
->get_vblank_timestamp
= NULL
;
2664 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
2666 if (IS_VALLEYVIEW(dev
)) {
2667 dev
->driver
->irq_handler
= valleyview_irq_handler
;
2668 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
2669 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
2670 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
2671 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
2672 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
2673 } else if (IS_IVYBRIDGE(dev
)) {
2674 /* Share pre & uninstall handlers with ILK/SNB */
2675 dev
->driver
->irq_handler
= ivybridge_irq_handler
;
2676 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2677 dev
->driver
->irq_postinstall
= ivybridge_irq_postinstall
;
2678 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2679 dev
->driver
->enable_vblank
= ivybridge_enable_vblank
;
2680 dev
->driver
->disable_vblank
= ivybridge_disable_vblank
;
2681 } else if (IS_HASWELL(dev
)) {
2682 /* Share interrupts handling with IVB */
2683 dev
->driver
->irq_handler
= ivybridge_irq_handler
;
2684 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2685 dev
->driver
->irq_postinstall
= ivybridge_irq_postinstall
;
2686 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2687 dev
->driver
->enable_vblank
= ivybridge_enable_vblank
;
2688 dev
->driver
->disable_vblank
= ivybridge_disable_vblank
;
2689 } else if (HAS_PCH_SPLIT(dev
)) {
2690 dev
->driver
->irq_handler
= ironlake_irq_handler
;
2691 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2692 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
2693 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2694 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
2695 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
2697 if (INTEL_INFO(dev
)->gen
== 2) {
2698 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
2699 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
2700 dev
->driver
->irq_handler
= i8xx_irq_handler
;
2701 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
2702 } else if (INTEL_INFO(dev
)->gen
== 3) {
2703 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
2704 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
2705 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
2706 dev
->driver
->irq_handler
= i915_irq_handler
;
2708 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
2709 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
2710 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
2711 dev
->driver
->irq_handler
= i965_irq_handler
;
2713 dev
->driver
->enable_vblank
= i915_enable_vblank
;
2714 dev
->driver
->disable_vblank
= i915_disable_vblank
;