1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #define USER_INT_FLAG (1<<1)
35 #define VSYNC_PIPEB_FLAG (1<<5)
36 #define VSYNC_PIPEA_FLAG (1<<7)
38 #define MAX_NOPID ((u32)~0)
41 * Emit blits for scheduled buffer swaps.
43 * This function will be called with the HW lock held.
45 static void i915_vblank_tasklet(drm_device_t
*dev
)
47 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
48 unsigned long irqflags
;
49 struct list_head
*list
, *tmp
, hits
, *hit
;
50 int nhits
, nrects
, slice
[2], upper
[2], lower
[2], i
;
51 unsigned counter
[2] = { atomic_read(&dev
->vbl_received
),
52 atomic_read(&dev
->vbl_received2
) };
53 drm_drawable_info_t
*drw
;
54 drm_i915_sarea_t
*sarea_priv
= dev_priv
->sarea_priv
;
55 u32 cpp
= dev_priv
->cpp
;
56 u32 cmd
= (cpp
== 4) ? (XY_SRC_COPY_BLT_CMD
|
57 XY_SRC_COPY_BLT_WRITE_ALPHA
|
58 XY_SRC_COPY_BLT_WRITE_RGB
)
59 : XY_SRC_COPY_BLT_CMD
;
60 u32 pitchropcpp
= (sarea_priv
->pitch
* cpp
) | (0xcc << 16) |
61 (cpp
<< 23) | (1 << 24);
66 INIT_LIST_HEAD(&hits
);
70 spin_lock_irqsave(&dev_priv
->swaps_lock
, irqflags
);
72 /* Find buffer swaps scheduled for this vertical blank */
73 list_for_each_safe(list
, tmp
, &dev_priv
->vbl_swaps
.head
) {
74 drm_i915_vbl_swap_t
*vbl_swap
=
75 list_entry(list
, drm_i915_vbl_swap_t
, head
);
77 if ((counter
[vbl_swap
->pipe
] - vbl_swap
->sequence
) > (1<<23))
81 dev_priv
->swaps_pending
--;
83 spin_unlock(&dev_priv
->swaps_lock
);
84 spin_lock(&dev
->drw_lock
);
86 drw
= drm_get_drawable_info(dev
, vbl_swap
->drw_id
);
89 spin_unlock(&dev
->drw_lock
);
90 drm_free(vbl_swap
, sizeof(*vbl_swap
), DRM_MEM_DRIVER
);
91 spin_lock(&dev_priv
->swaps_lock
);
95 list_for_each(hit
, &hits
) {
96 drm_i915_vbl_swap_t
*swap_cmp
=
97 list_entry(hit
, drm_i915_vbl_swap_t
, head
);
98 drm_drawable_info_t
*drw_cmp
=
99 drm_get_drawable_info(dev
, swap_cmp
->drw_id
);
102 drw_cmp
->rects
[0].y1
> drw
->rects
[0].y1
) {
103 list_add_tail(list
, hit
);
108 spin_unlock(&dev
->drw_lock
);
110 /* List of hits was empty, or we reached the end of it */
112 list_add_tail(list
, hits
.prev
);
116 spin_lock(&dev_priv
->swaps_lock
);
120 spin_unlock_irqrestore(&dev_priv
->swaps_lock
, irqflags
);
124 spin_unlock(&dev_priv
->swaps_lock
);
126 i915_kernel_lost_context(dev
);
130 OUT_RING(GFX_OP_DRAWRECT_INFO
);
133 OUT_RING(sarea_priv
->width
| sarea_priv
->height
<< 16);
134 OUT_RING(sarea_priv
->width
| sarea_priv
->height
<< 16);
139 sarea_priv
->ctxOwner
= DRM_KERNEL_CONTEXT
;
141 upper
[0] = upper
[1] = 0;
142 slice
[0] = max(sarea_priv
->pipeA_h
/ nhits
, 1);
143 slice
[1] = max(sarea_priv
->pipeB_h
/ nhits
, 1);
144 lower
[0] = sarea_priv
->pipeA_y
+ slice
[0];
145 lower
[1] = sarea_priv
->pipeB_y
+ slice
[0];
147 spin_lock(&dev
->drw_lock
);
149 /* Emit blits for buffer swaps, partitioning both outputs into as many
150 * slices as there are buffer swaps scheduled in order to avoid tearing
151 * (based on the assumption that a single buffer swap would always
152 * complete before scanout starts).
154 for (i
= 0; i
++ < nhits
;
155 upper
[0] = lower
[0], lower
[0] += slice
[0],
156 upper
[1] = lower
[1], lower
[1] += slice
[1]) {
158 lower
[0] = lower
[1] = sarea_priv
->height
;
160 list_for_each(hit
, &hits
) {
161 drm_i915_vbl_swap_t
*swap_hit
=
162 list_entry(hit
, drm_i915_vbl_swap_t
, head
);
163 drm_clip_rect_t
*rect
;
165 unsigned short top
, bottom
;
167 drw
= drm_get_drawable_info(dev
, swap_hit
->drw_id
);
173 pipe
= swap_hit
->pipe
;
175 bottom
= lower
[pipe
];
177 for (num_rects
= drw
->num_rects
; num_rects
--; rect
++) {
178 int y1
= max(rect
->y1
, top
);
179 int y2
= min(rect
->y2
, bottom
);
187 OUT_RING(pitchropcpp
);
188 OUT_RING((y1
<< 16) | rect
->x1
);
189 OUT_RING((y2
<< 16) | rect
->x2
);
190 OUT_RING(sarea_priv
->front_offset
);
191 OUT_RING((y1
<< 16) | rect
->x1
);
192 OUT_RING(pitchropcpp
& 0xffff);
193 OUT_RING(sarea_priv
->back_offset
);
200 spin_unlock_irqrestore(&dev
->drw_lock
, irqflags
);
202 list_for_each_safe(hit
, tmp
, &hits
) {
203 drm_i915_vbl_swap_t
*swap_hit
=
204 list_entry(hit
, drm_i915_vbl_swap_t
, head
);
208 drm_free(swap_hit
, sizeof(*swap_hit
), DRM_MEM_DRIVER
);
212 irqreturn_t
i915_driver_irq_handler(DRM_IRQ_ARGS
)
214 drm_device_t
*dev
= (drm_device_t
*) arg
;
215 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
218 temp
= I915_READ16(I915REG_INT_IDENTITY_R
);
220 temp
&= (USER_INT_FLAG
| VSYNC_PIPEA_FLAG
| VSYNC_PIPEB_FLAG
);
222 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__
, temp
);
227 I915_WRITE16(I915REG_INT_IDENTITY_R
, temp
);
229 dev_priv
->sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
231 if (temp
& USER_INT_FLAG
)
232 DRM_WAKEUP(&dev_priv
->irq_queue
);
234 if (temp
& (VSYNC_PIPEA_FLAG
| VSYNC_PIPEB_FLAG
)) {
235 int vblank_pipe
= dev_priv
->vblank_pipe
;
238 (DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
))
239 == (DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
)) {
240 if (temp
& VSYNC_PIPEA_FLAG
)
241 atomic_inc(&dev
->vbl_received
);
242 if (temp
& VSYNC_PIPEB_FLAG
)
243 atomic_inc(&dev
->vbl_received2
);
244 } else if (((temp
& VSYNC_PIPEA_FLAG
) &&
245 (vblank_pipe
& DRM_I915_VBLANK_PIPE_A
)) ||
246 ((temp
& VSYNC_PIPEB_FLAG
) &&
247 (vblank_pipe
& DRM_I915_VBLANK_PIPE_B
)))
248 atomic_inc(&dev
->vbl_received
);
250 DRM_WAKEUP(&dev
->vbl_queue
);
251 drm_vbl_send_signals(dev
);
253 if (dev_priv
->swaps_pending
> 0)
254 drm_locked_tasklet(dev
, i915_vblank_tasklet
);
260 static int i915_emit_irq(drm_device_t
* dev
)
262 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
265 i915_kernel_lost_context(dev
);
267 DRM_DEBUG("%s\n", __FUNCTION__
);
269 dev_priv
->sarea_priv
->last_enqueue
= ++dev_priv
->counter
;
271 if (dev_priv
->counter
> 0x7FFFFFFFUL
)
272 dev_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
= 1;
275 OUT_RING(CMD_STORE_DWORD_IDX
);
277 OUT_RING(dev_priv
->counter
);
280 OUT_RING(GFX_OP_USER_INTERRUPT
);
283 return dev_priv
->counter
;
286 static int i915_wait_irq(drm_device_t
* dev
, int irq_nr
)
288 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
291 DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__
, irq_nr
,
292 READ_BREADCRUMB(dev_priv
));
294 if (READ_BREADCRUMB(dev_priv
) >= irq_nr
)
297 dev_priv
->sarea_priv
->perf_boxes
|= I915_BOX_WAIT
;
299 DRM_WAIT_ON(ret
, dev_priv
->irq_queue
, 3 * DRM_HZ
,
300 READ_BREADCRUMB(dev_priv
) >= irq_nr
);
302 if (ret
== DRM_ERR(EBUSY
)) {
303 DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
305 READ_BREADCRUMB(dev_priv
), (int)dev_priv
->counter
);
308 dev_priv
->sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
312 static int i915_driver_vblank_do_wait(drm_device_t
*dev
, unsigned int *sequence
,
315 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
316 unsigned int cur_vblank
;
320 DRM_ERROR("%s called with no initialization\n", __FUNCTION__
);
321 return DRM_ERR(EINVAL
);
324 DRM_WAIT_ON(ret
, dev
->vbl_queue
, 3 * DRM_HZ
,
325 (((cur_vblank
= atomic_read(counter
))
326 - *sequence
) <= (1<<23)));
328 *sequence
= cur_vblank
;
334 int i915_driver_vblank_wait(drm_device_t
*dev
, unsigned int *sequence
)
336 return i915_driver_vblank_do_wait(dev
, sequence
, &dev
->vbl_received
);
339 int i915_driver_vblank_wait2(drm_device_t
*dev
, unsigned int *sequence
)
341 return i915_driver_vblank_do_wait(dev
, sequence
, &dev
->vbl_received2
);
344 /* Needs the lock as it touches the ring.
346 int i915_irq_emit(DRM_IOCTL_ARGS
)
349 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
350 drm_i915_irq_emit_t emit
;
353 LOCK_TEST_WITH_RETURN(dev
, filp
);
356 DRM_ERROR("%s called with no initialization\n", __FUNCTION__
);
357 return DRM_ERR(EINVAL
);
360 DRM_COPY_FROM_USER_IOCTL(emit
, (drm_i915_irq_emit_t __user
*) data
,
363 result
= i915_emit_irq(dev
);
365 if (DRM_COPY_TO_USER(emit
.irq_seq
, &result
, sizeof(int))) {
366 DRM_ERROR("copy_to_user\n");
367 return DRM_ERR(EFAULT
);
373 /* Doesn't need the hardware lock.
375 int i915_irq_wait(DRM_IOCTL_ARGS
)
378 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
379 drm_i915_irq_wait_t irqwait
;
382 DRM_ERROR("%s called with no initialization\n", __FUNCTION__
);
383 return DRM_ERR(EINVAL
);
386 DRM_COPY_FROM_USER_IOCTL(irqwait
, (drm_i915_irq_wait_t __user
*) data
,
389 return i915_wait_irq(dev
, irqwait
.irq_seq
);
392 static void i915_enable_interrupt (drm_device_t
*dev
)
394 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
398 if (dev_priv
->vblank_pipe
& DRM_I915_VBLANK_PIPE_A
)
399 flag
|= VSYNC_PIPEA_FLAG
;
400 if (dev_priv
->vblank_pipe
& DRM_I915_VBLANK_PIPE_B
)
401 flag
|= VSYNC_PIPEB_FLAG
;
403 I915_WRITE16(I915REG_INT_ENABLE_R
, USER_INT_FLAG
| flag
);
406 /* Set the vblank monitor pipe
408 int i915_vblank_pipe_set(DRM_IOCTL_ARGS
)
411 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
412 drm_i915_vblank_pipe_t pipe
;
415 DRM_ERROR("%s called with no initialization\n", __FUNCTION__
);
416 return DRM_ERR(EINVAL
);
419 DRM_COPY_FROM_USER_IOCTL(pipe
, (drm_i915_vblank_pipe_t __user
*) data
,
422 if (pipe
.pipe
& ~(DRM_I915_VBLANK_PIPE_A
|DRM_I915_VBLANK_PIPE_B
)) {
423 DRM_ERROR("%s called with invalid pipe 0x%x\n",
424 __FUNCTION__
, pipe
.pipe
);
425 return DRM_ERR(EINVAL
);
428 dev_priv
->vblank_pipe
= pipe
.pipe
;
430 i915_enable_interrupt (dev
);
435 int i915_vblank_pipe_get(DRM_IOCTL_ARGS
)
438 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
439 drm_i915_vblank_pipe_t pipe
;
443 DRM_ERROR("%s called with no initialization\n", __FUNCTION__
);
444 return DRM_ERR(EINVAL
);
447 flag
= I915_READ(I915REG_INT_ENABLE_R
);
449 if (flag
& VSYNC_PIPEA_FLAG
)
450 pipe
.pipe
|= DRM_I915_VBLANK_PIPE_A
;
451 if (flag
& VSYNC_PIPEB_FLAG
)
452 pipe
.pipe
|= DRM_I915_VBLANK_PIPE_B
;
453 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_pipe_t __user
*) data
, pipe
,
459 * Schedule buffer swap at given vertical blank.
461 int i915_vblank_swap(DRM_IOCTL_ARGS
)
464 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
465 drm_i915_vblank_swap_t swap
;
466 drm_i915_vbl_swap_t
*vbl_swap
;
467 unsigned int pipe
, seqtype
, curseq
;
468 unsigned long irqflags
;
469 struct list_head
*list
;
472 DRM_ERROR("%s called with no initialization\n", __func__
);
473 return DRM_ERR(EINVAL
);
476 if (dev_priv
->sarea_priv
->rotation
) {
477 DRM_DEBUG("Rotation not supported\n");
478 return DRM_ERR(EINVAL
);
481 DRM_COPY_FROM_USER_IOCTL(swap
, (drm_i915_vblank_swap_t __user
*) data
,
484 if (swap
.seqtype
& ~(_DRM_VBLANK_RELATIVE
| _DRM_VBLANK_ABSOLUTE
|
485 _DRM_VBLANK_SECONDARY
| _DRM_VBLANK_NEXTONMISS
)) {
486 DRM_ERROR("Invalid sequence type 0x%x\n", swap
.seqtype
);
487 return DRM_ERR(EINVAL
);
490 pipe
= (swap
.seqtype
& _DRM_VBLANK_SECONDARY
) ? 1 : 0;
492 seqtype
= swap
.seqtype
& (_DRM_VBLANK_RELATIVE
| _DRM_VBLANK_ABSOLUTE
);
494 if (!(dev_priv
->vblank_pipe
& (1 << pipe
))) {
495 DRM_ERROR("Invalid pipe %d\n", pipe
);
496 return DRM_ERR(EINVAL
);
499 spin_lock_irqsave(&dev
->drw_lock
, irqflags
);
501 if (!drm_get_drawable_info(dev
, swap
.drawable
)) {
502 spin_unlock_irqrestore(&dev
->drw_lock
, irqflags
);
503 DRM_DEBUG("Invalid drawable ID %d\n", swap
.drawable
);
504 return DRM_ERR(EINVAL
);
507 spin_unlock_irqrestore(&dev
->drw_lock
, irqflags
);
509 curseq
= atomic_read(pipe
? &dev
->vbl_received2
: &dev
->vbl_received
);
511 if (seqtype
== _DRM_VBLANK_RELATIVE
)
512 swap
.sequence
+= curseq
;
514 if ((curseq
- swap
.sequence
) <= (1<<23)) {
515 if (swap
.seqtype
& _DRM_VBLANK_NEXTONMISS
) {
516 swap
.sequence
= curseq
+ 1;
518 DRM_DEBUG("Missed target sequence\n");
519 return DRM_ERR(EINVAL
);
523 spin_lock_irqsave(&dev_priv
->swaps_lock
, irqflags
);
525 list_for_each(list
, &dev_priv
->vbl_swaps
.head
) {
526 vbl_swap
= list_entry(list
, drm_i915_vbl_swap_t
, head
);
528 if (vbl_swap
->drw_id
== swap
.drawable
&&
529 vbl_swap
->pipe
== pipe
&&
530 vbl_swap
->sequence
== swap
.sequence
) {
531 spin_unlock_irqrestore(&dev_priv
->swaps_lock
, irqflags
);
532 DRM_DEBUG("Already scheduled\n");
537 spin_unlock_irqrestore(&dev_priv
->swaps_lock
, irqflags
);
539 if (dev_priv
->swaps_pending
>= 100) {
540 DRM_DEBUG("Too many swaps queued\n");
541 return DRM_ERR(EBUSY
);
544 vbl_swap
= drm_calloc(1, sizeof(vbl_swap
), DRM_MEM_DRIVER
);
547 DRM_ERROR("Failed to allocate memory to queue swap\n");
548 return DRM_ERR(ENOMEM
);
553 vbl_swap
->drw_id
= swap
.drawable
;
554 vbl_swap
->pipe
= pipe
;
555 vbl_swap
->sequence
= swap
.sequence
;
557 spin_lock_irqsave(&dev_priv
->swaps_lock
, irqflags
);
559 list_add_tail((struct list_head
*)vbl_swap
, &dev_priv
->vbl_swaps
.head
);
560 dev_priv
->swaps_pending
++;
562 spin_unlock_irqrestore(&dev_priv
->swaps_lock
, irqflags
);
564 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user
*) data
, swap
,
572 void i915_driver_irq_preinstall(drm_device_t
* dev
)
574 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
576 I915_WRITE16(I915REG_HWSTAM
, 0xfffe);
577 I915_WRITE16(I915REG_INT_MASK_R
, 0x0);
578 I915_WRITE16(I915REG_INT_ENABLE_R
, 0x0);
581 void i915_driver_irq_postinstall(drm_device_t
* dev
)
583 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
585 spin_lock_init(&dev_priv
->swaps_lock
);
586 INIT_LIST_HEAD(&dev_priv
->vbl_swaps
.head
);
587 dev_priv
->swaps_pending
= 0;
589 if (!dev_priv
->vblank_pipe
)
590 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
;
591 i915_enable_interrupt(dev
);
592 DRM_INIT_WAITQUEUE(&dev_priv
->irq_queue
);
595 void i915_driver_irq_uninstall(drm_device_t
* dev
)
597 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
603 I915_WRITE16(I915REG_HWSTAM
, 0xffff);
604 I915_WRITE16(I915REG_INT_MASK_R
, 0xffff);
605 I915_WRITE16(I915REG_INT_ENABLE_R
, 0x0);
607 temp
= I915_READ16(I915REG_INT_IDENTITY_R
);
608 I915_WRITE16(I915REG_INT_IDENTITY_R
, temp
);