1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include "intel_drv.h"
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include <linux/pci.h>
39 #include <linux/vgaarb.h>
40 #include <linux/acpi.h>
41 #include <linux/pnp.h>
42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h>
44 #include <acpi/video.h>
46 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
48 #define BEGIN_LP_RING(n) \
49 intel_ring_begin(LP_RING(dev_priv), (n))
52 intel_ring_emit(LP_RING(dev_priv), x)
54 #define ADVANCE_LP_RING() \
55 intel_ring_advance(LP_RING(dev_priv))
58 * Lock test for when it's just for synchronization of ring access.
60 * In that case, we don't need to do it when GEM is initialized as nobody else
61 * has access to the ring.
63 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
64 if (LP_RING(dev->dev_private)->obj == NULL) \
65 LOCK_TEST_WITH_RETURN(dev, file); \
69 intel_read_legacy_status_page(struct drm_i915_private
*dev_priv
, int reg
)
71 if (I915_NEED_GFX_HWS(dev_priv
->dev
))
72 return ioread32(dev_priv
->dri1
.gfx_hws_cpu_addr
+ reg
);
74 return intel_read_status_page(LP_RING(dev_priv
), reg
);
77 #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
78 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
79 #define I915_BREADCRUMB_INDEX 0x21
81 void i915_update_dri1_breadcrumb(struct drm_device
*dev
)
83 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
84 struct drm_i915_master_private
*master_priv
;
87 * The dri breadcrumb update races against the drm master disappearing.
88 * Instead of trying to fix this (this is by far not the only ums issue)
89 * just don't do the update in kms mode.
91 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
94 if (dev
->primary
->master
) {
95 master_priv
= dev
->primary
->master
->driver_priv
;
96 if (master_priv
->sarea_priv
)
97 master_priv
->sarea_priv
->last_dispatch
=
98 READ_BREADCRUMB(dev_priv
);
102 static void i915_write_hws_pga(struct drm_device
*dev
)
104 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
107 addr
= dev_priv
->status_page_dmah
->busaddr
;
108 if (INTEL_INFO(dev
)->gen
>= 4)
109 addr
|= (dev_priv
->status_page_dmah
->busaddr
>> 28) & 0xf0;
110 I915_WRITE(HWS_PGA
, addr
);
114 * Frees the hardware status page, whether it's a physical address or a virtual
115 * address set up by the X Server.
117 static void i915_free_hws(struct drm_device
*dev
)
119 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
120 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
122 if (dev_priv
->status_page_dmah
) {
123 drm_pci_free(dev
, dev_priv
->status_page_dmah
);
124 dev_priv
->status_page_dmah
= NULL
;
127 if (ring
->status_page
.gfx_addr
) {
128 ring
->status_page
.gfx_addr
= 0;
129 iounmap(dev_priv
->dri1
.gfx_hws_cpu_addr
);
132 /* Need to rewrite hardware status page */
133 I915_WRITE(HWS_PGA
, 0x1ffff000);
136 void i915_kernel_lost_context(struct drm_device
* dev
)
138 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
139 struct drm_i915_master_private
*master_priv
;
140 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
143 * We should never lose context on the ring with modesetting
144 * as we don't expose it to userspace
146 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
149 ring
->head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
150 ring
->tail
= I915_READ_TAIL(ring
) & TAIL_ADDR
;
151 ring
->space
= ring
->head
- (ring
->tail
+ I915_RING_FREE_SPACE
);
153 ring
->space
+= ring
->size
;
155 if (!dev
->primary
->master
)
158 master_priv
= dev
->primary
->master
->driver_priv
;
159 if (ring
->head
== ring
->tail
&& master_priv
->sarea_priv
)
160 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_RING_EMPTY
;
163 static int i915_dma_cleanup(struct drm_device
* dev
)
165 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
168 /* Make sure interrupts are disabled here because the uninstall ioctl
169 * may not have been called from userspace and after dev_private
170 * is freed, it's too late.
172 if (dev
->irq_enabled
)
173 drm_irq_uninstall(dev
);
175 mutex_lock(&dev
->struct_mutex
);
176 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
177 intel_cleanup_ring_buffer(&dev_priv
->ring
[i
]);
178 mutex_unlock(&dev
->struct_mutex
);
180 /* Clear the HWS virtual address at teardown */
181 if (I915_NEED_GFX_HWS(dev
))
187 static int i915_initialize(struct drm_device
* dev
, drm_i915_init_t
* init
)
189 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
190 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
193 master_priv
->sarea
= drm_getsarea(dev
);
194 if (master_priv
->sarea
) {
195 master_priv
->sarea_priv
= (drm_i915_sarea_t
*)
196 ((u8
*)master_priv
->sarea
->handle
+ init
->sarea_priv_offset
);
198 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
201 if (init
->ring_size
!= 0) {
202 if (LP_RING(dev_priv
)->obj
!= NULL
) {
203 i915_dma_cleanup(dev
);
204 DRM_ERROR("Client tried to initialize ringbuffer in "
209 ret
= intel_render_ring_init_dri(dev
,
213 i915_dma_cleanup(dev
);
218 dev_priv
->dri1
.cpp
= init
->cpp
;
219 dev_priv
->dri1
.back_offset
= init
->back_offset
;
220 dev_priv
->dri1
.front_offset
= init
->front_offset
;
221 dev_priv
->dri1
.current_page
= 0;
222 if (master_priv
->sarea_priv
)
223 master_priv
->sarea_priv
->pf_current_page
= 0;
225 /* Allow hardware batchbuffers unless told otherwise.
227 dev_priv
->dri1
.allow_batchbuffer
= 1;
232 static int i915_dma_resume(struct drm_device
* dev
)
234 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
235 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
237 DRM_DEBUG_DRIVER("%s\n", __func__
);
239 if (ring
->virtual_start
== NULL
) {
240 DRM_ERROR("can not ioremap virtual address for"
245 /* Program Hardware Status Page */
246 if (!ring
->status_page
.page_addr
) {
247 DRM_ERROR("Can not find hardware status page\n");
250 DRM_DEBUG_DRIVER("hw status page @ %p\n",
251 ring
->status_page
.page_addr
);
252 if (ring
->status_page
.gfx_addr
!= 0)
253 intel_ring_setup_status_page(ring
);
255 i915_write_hws_pga(dev
);
257 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
262 static int i915_dma_init(struct drm_device
*dev
, void *data
,
263 struct drm_file
*file_priv
)
265 drm_i915_init_t
*init
= data
;
268 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
271 switch (init
->func
) {
273 retcode
= i915_initialize(dev
, init
);
275 case I915_CLEANUP_DMA
:
276 retcode
= i915_dma_cleanup(dev
);
278 case I915_RESUME_DMA
:
279 retcode
= i915_dma_resume(dev
);
289 /* Implement basically the same security restrictions as hardware does
290 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
292 * Most of the calculations below involve calculating the size of a
293 * particular instruction. It's important to get the size right as
294 * that tells us where the next instruction to check is. Any illegal
295 * instruction detected will be given a size of zero, which is a
296 * signal to abort the rest of the buffer.
298 static int validate_cmd(int cmd
)
300 switch (((cmd
>> 29) & 0x7)) {
302 switch ((cmd
>> 23) & 0x3f) {
304 return 1; /* MI_NOOP */
306 return 1; /* MI_FLUSH */
308 return 0; /* disallow everything else */
312 return 0; /* reserved */
314 return (cmd
& 0xff) + 2; /* 2d commands */
316 if (((cmd
>> 24) & 0x1f) <= 0x18)
319 switch ((cmd
>> 24) & 0x1f) {
323 switch ((cmd
>> 16) & 0xff) {
325 return (cmd
& 0x1f) + 2;
327 return (cmd
& 0xf) + 2;
329 return (cmd
& 0xffff) + 2;
333 return (cmd
& 0xffff) + 1;
337 if ((cmd
& (1 << 23)) == 0) /* inline vertices */
338 return (cmd
& 0x1ffff) + 2;
339 else if (cmd
& (1 << 17)) /* indirect random */
340 if ((cmd
& 0xffff) == 0)
341 return 0; /* unknown length, too hard */
343 return (((cmd
& 0xffff) + 1) / 2) + 1;
345 return 2; /* indirect sequential */
356 static int i915_emit_cmds(struct drm_device
* dev
, int *buffer
, int dwords
)
358 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
361 if ((dwords
+1) * sizeof(int) >= LP_RING(dev_priv
)->size
- 8)
364 for (i
= 0; i
< dwords
;) {
365 int sz
= validate_cmd(buffer
[i
]);
366 if (sz
== 0 || i
+ sz
> dwords
)
371 ret
= BEGIN_LP_RING((dwords
+1)&~1);
375 for (i
= 0; i
< dwords
; i
++)
386 i915_emit_box(struct drm_device
*dev
,
387 struct drm_clip_rect
*box
,
390 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
393 if (box
->y2
<= box
->y1
|| box
->x2
<= box
->x1
||
394 box
->y2
<= 0 || box
->x2
<= 0) {
395 DRM_ERROR("Bad box %d,%d..%d,%d\n",
396 box
->x1
, box
->y1
, box
->x2
, box
->y2
);
400 if (INTEL_INFO(dev
)->gen
>= 4) {
401 ret
= BEGIN_LP_RING(4);
405 OUT_RING(GFX_OP_DRAWRECT_INFO_I965
);
406 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
407 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
410 ret
= BEGIN_LP_RING(6);
414 OUT_RING(GFX_OP_DRAWRECT_INFO
);
416 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
417 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
426 /* XXX: Emitting the counter should really be moved to part of the IRQ
427 * emit. For now, do it in both places:
430 static void i915_emit_breadcrumb(struct drm_device
*dev
)
432 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
433 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
435 dev_priv
->dri1
.counter
++;
436 if (dev_priv
->dri1
.counter
> 0x7FFFFFFFUL
)
437 dev_priv
->dri1
.counter
= 0;
438 if (master_priv
->sarea_priv
)
439 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->dri1
.counter
;
441 if (BEGIN_LP_RING(4) == 0) {
442 OUT_RING(MI_STORE_DWORD_INDEX
);
443 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
444 OUT_RING(dev_priv
->dri1
.counter
);
450 static int i915_dispatch_cmdbuffer(struct drm_device
* dev
,
451 drm_i915_cmdbuffer_t
*cmd
,
452 struct drm_clip_rect
*cliprects
,
455 int nbox
= cmd
->num_cliprects
;
456 int i
= 0, count
, ret
;
459 DRM_ERROR("alignment");
463 i915_kernel_lost_context(dev
);
465 count
= nbox
? nbox
: 1;
467 for (i
= 0; i
< count
; i
++) {
469 ret
= i915_emit_box(dev
, &cliprects
[i
],
475 ret
= i915_emit_cmds(dev
, cmdbuf
, cmd
->sz
/ 4);
480 i915_emit_breadcrumb(dev
);
484 static int i915_dispatch_batchbuffer(struct drm_device
* dev
,
485 drm_i915_batchbuffer_t
* batch
,
486 struct drm_clip_rect
*cliprects
)
488 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
489 int nbox
= batch
->num_cliprects
;
492 if ((batch
->start
| batch
->used
) & 0x7) {
493 DRM_ERROR("alignment");
497 i915_kernel_lost_context(dev
);
499 count
= nbox
? nbox
: 1;
500 for (i
= 0; i
< count
; i
++) {
502 ret
= i915_emit_box(dev
, &cliprects
[i
],
503 batch
->DR1
, batch
->DR4
);
508 if (!IS_I830(dev
) && !IS_845G(dev
)) {
509 ret
= BEGIN_LP_RING(2);
513 if (INTEL_INFO(dev
)->gen
>= 4) {
514 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6) | MI_BATCH_NON_SECURE_I965
);
515 OUT_RING(batch
->start
);
517 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6));
518 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
521 ret
= BEGIN_LP_RING(4);
525 OUT_RING(MI_BATCH_BUFFER
);
526 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
527 OUT_RING(batch
->start
+ batch
->used
- 4);
534 if (IS_G4X(dev
) || IS_GEN5(dev
)) {
535 if (BEGIN_LP_RING(2) == 0) {
536 OUT_RING(MI_FLUSH
| MI_NO_WRITE_FLUSH
| MI_INVALIDATE_ISP
);
542 i915_emit_breadcrumb(dev
);
546 static int i915_dispatch_flip(struct drm_device
* dev
)
548 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
549 struct drm_i915_master_private
*master_priv
=
550 dev
->primary
->master
->driver_priv
;
553 if (!master_priv
->sarea_priv
)
556 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
558 dev_priv
->dri1
.current_page
,
559 master_priv
->sarea_priv
->pf_current_page
);
561 i915_kernel_lost_context(dev
);
563 ret
= BEGIN_LP_RING(10);
567 OUT_RING(MI_FLUSH
| MI_READ_FLUSH
);
570 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO
| ASYNC_FLIP
);
572 if (dev_priv
->dri1
.current_page
== 0) {
573 OUT_RING(dev_priv
->dri1
.back_offset
);
574 dev_priv
->dri1
.current_page
= 1;
576 OUT_RING(dev_priv
->dri1
.front_offset
);
577 dev_priv
->dri1
.current_page
= 0;
581 OUT_RING(MI_WAIT_FOR_EVENT
| MI_WAIT_FOR_PLANE_A_FLIP
);
586 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->dri1
.counter
++;
588 if (BEGIN_LP_RING(4) == 0) {
589 OUT_RING(MI_STORE_DWORD_INDEX
);
590 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
591 OUT_RING(dev_priv
->dri1
.counter
);
596 master_priv
->sarea_priv
->pf_current_page
= dev_priv
->dri1
.current_page
;
600 static int i915_quiescent(struct drm_device
*dev
)
602 i915_kernel_lost_context(dev
);
603 return intel_ring_idle(LP_RING(dev
->dev_private
));
606 static int i915_flush_ioctl(struct drm_device
*dev
, void *data
,
607 struct drm_file
*file_priv
)
611 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
614 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
616 mutex_lock(&dev
->struct_mutex
);
617 ret
= i915_quiescent(dev
);
618 mutex_unlock(&dev
->struct_mutex
);
623 static int i915_batchbuffer(struct drm_device
*dev
, void *data
,
624 struct drm_file
*file_priv
)
626 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
627 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
628 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
629 master_priv
->sarea_priv
;
630 drm_i915_batchbuffer_t
*batch
= data
;
632 struct drm_clip_rect
*cliprects
= NULL
;
634 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
637 if (!dev_priv
->dri1
.allow_batchbuffer
) {
638 DRM_ERROR("Batchbuffer ioctl disabled\n");
642 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
643 batch
->start
, batch
->used
, batch
->num_cliprects
);
645 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
647 if (batch
->num_cliprects
< 0)
650 if (batch
->num_cliprects
) {
651 cliprects
= kcalloc(batch
->num_cliprects
,
652 sizeof(struct drm_clip_rect
),
654 if (cliprects
== NULL
)
657 ret
= copy_from_user(cliprects
, batch
->cliprects
,
658 batch
->num_cliprects
*
659 sizeof(struct drm_clip_rect
));
666 mutex_lock(&dev
->struct_mutex
);
667 ret
= i915_dispatch_batchbuffer(dev
, batch
, cliprects
);
668 mutex_unlock(&dev
->struct_mutex
);
671 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
679 static int i915_cmdbuffer(struct drm_device
*dev
, void *data
,
680 struct drm_file
*file_priv
)
682 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
683 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
684 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
685 master_priv
->sarea_priv
;
686 drm_i915_cmdbuffer_t
*cmdbuf
= data
;
687 struct drm_clip_rect
*cliprects
= NULL
;
691 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
692 cmdbuf
->buf
, cmdbuf
->sz
, cmdbuf
->num_cliprects
);
694 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
697 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
699 if (cmdbuf
->num_cliprects
< 0)
702 batch_data
= kmalloc(cmdbuf
->sz
, GFP_KERNEL
);
703 if (batch_data
== NULL
)
706 ret
= copy_from_user(batch_data
, cmdbuf
->buf
, cmdbuf
->sz
);
709 goto fail_batch_free
;
712 if (cmdbuf
->num_cliprects
) {
713 cliprects
= kcalloc(cmdbuf
->num_cliprects
,
714 sizeof(struct drm_clip_rect
), GFP_KERNEL
);
715 if (cliprects
== NULL
) {
717 goto fail_batch_free
;
720 ret
= copy_from_user(cliprects
, cmdbuf
->cliprects
,
721 cmdbuf
->num_cliprects
*
722 sizeof(struct drm_clip_rect
));
729 mutex_lock(&dev
->struct_mutex
);
730 ret
= i915_dispatch_cmdbuffer(dev
, cmdbuf
, cliprects
, batch_data
);
731 mutex_unlock(&dev
->struct_mutex
);
733 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
738 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
748 static int i915_emit_irq(struct drm_device
* dev
)
750 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
751 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
753 i915_kernel_lost_context(dev
);
755 DRM_DEBUG_DRIVER("\n");
757 dev_priv
->dri1
.counter
++;
758 if (dev_priv
->dri1
.counter
> 0x7FFFFFFFUL
)
759 dev_priv
->dri1
.counter
= 1;
760 if (master_priv
->sarea_priv
)
761 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->dri1
.counter
;
763 if (BEGIN_LP_RING(4) == 0) {
764 OUT_RING(MI_STORE_DWORD_INDEX
);
765 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
766 OUT_RING(dev_priv
->dri1
.counter
);
767 OUT_RING(MI_USER_INTERRUPT
);
771 return dev_priv
->dri1
.counter
;
774 static int i915_wait_irq(struct drm_device
* dev
, int irq_nr
)
776 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
777 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
779 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
781 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr
,
782 READ_BREADCRUMB(dev_priv
));
784 if (READ_BREADCRUMB(dev_priv
) >= irq_nr
) {
785 if (master_priv
->sarea_priv
)
786 master_priv
->sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
790 if (master_priv
->sarea_priv
)
791 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_WAIT
;
793 if (ring
->irq_get(ring
)) {
794 DRM_WAIT_ON(ret
, ring
->irq_queue
, 3 * DRM_HZ
,
795 READ_BREADCRUMB(dev_priv
) >= irq_nr
);
797 } else if (wait_for(READ_BREADCRUMB(dev_priv
) >= irq_nr
, 3000))
801 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
802 READ_BREADCRUMB(dev_priv
), (int)dev_priv
->dri1
.counter
);
808 /* Needs the lock as it touches the ring.
810 static int i915_irq_emit(struct drm_device
*dev
, void *data
,
811 struct drm_file
*file_priv
)
813 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
814 drm_i915_irq_emit_t
*emit
= data
;
817 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
820 if (!dev_priv
|| !LP_RING(dev_priv
)->virtual_start
) {
821 DRM_ERROR("called with no initialization\n");
825 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
827 mutex_lock(&dev
->struct_mutex
);
828 result
= i915_emit_irq(dev
);
829 mutex_unlock(&dev
->struct_mutex
);
831 if (DRM_COPY_TO_USER(emit
->irq_seq
, &result
, sizeof(int))) {
832 DRM_ERROR("copy_to_user\n");
839 /* Doesn't need the hardware lock.
841 static int i915_irq_wait(struct drm_device
*dev
, void *data
,
842 struct drm_file
*file_priv
)
844 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
845 drm_i915_irq_wait_t
*irqwait
= data
;
847 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
851 DRM_ERROR("called with no initialization\n");
855 return i915_wait_irq(dev
, irqwait
->irq_seq
);
858 static int i915_vblank_pipe_get(struct drm_device
*dev
, void *data
,
859 struct drm_file
*file_priv
)
861 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
862 drm_i915_vblank_pipe_t
*pipe
= data
;
864 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
868 DRM_ERROR("called with no initialization\n");
872 pipe
->pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
878 * Schedule buffer swap at given vertical blank.
880 static int i915_vblank_swap(struct drm_device
*dev
, void *data
,
881 struct drm_file
*file_priv
)
883 /* The delayed swap mechanism was fundamentally racy, and has been
884 * removed. The model was that the client requested a delayed flip/swap
885 * from the kernel, then waited for vblank before continuing to perform
886 * rendering. The problem was that the kernel might wake the client
887 * up before it dispatched the vblank swap (since the lock has to be
888 * held while touching the ringbuffer), in which case the client would
889 * clear and start the next frame before the swap occurred, and
890 * flicker would occur in addition to likely missing the vblank.
892 * In the absence of this ioctl, userland falls back to a correct path
893 * of waiting for a vblank, then dispatching the swap on its own.
894 * Context switching to userland and back is plenty fast enough for
895 * meeting the requirements of vblank swapping.
900 static int i915_flip_bufs(struct drm_device
*dev
, void *data
,
901 struct drm_file
*file_priv
)
905 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
908 DRM_DEBUG_DRIVER("%s\n", __func__
);
910 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
912 mutex_lock(&dev
->struct_mutex
);
913 ret
= i915_dispatch_flip(dev
);
914 mutex_unlock(&dev
->struct_mutex
);
919 static int i915_getparam(struct drm_device
*dev
, void *data
,
920 struct drm_file
*file_priv
)
922 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
923 drm_i915_getparam_t
*param
= data
;
927 DRM_ERROR("called with no initialization\n");
931 switch (param
->param
) {
932 case I915_PARAM_IRQ_ACTIVE
:
933 value
= dev
->pdev
->irq
? 1 : 0;
935 case I915_PARAM_ALLOW_BATCHBUFFER
:
936 value
= dev_priv
->dri1
.allow_batchbuffer
? 1 : 0;
938 case I915_PARAM_LAST_DISPATCH
:
939 value
= READ_BREADCRUMB(dev_priv
);
941 case I915_PARAM_CHIPSET_ID
:
942 value
= dev
->pci_device
;
944 case I915_PARAM_HAS_GEM
:
947 case I915_PARAM_NUM_FENCES_AVAIL
:
948 value
= dev_priv
->num_fence_regs
- dev_priv
->fence_reg_start
;
950 case I915_PARAM_HAS_OVERLAY
:
951 value
= dev_priv
->overlay
? 1 : 0;
953 case I915_PARAM_HAS_PAGEFLIPPING
:
956 case I915_PARAM_HAS_EXECBUF2
:
960 case I915_PARAM_HAS_BSD
:
961 value
= intel_ring_initialized(&dev_priv
->ring
[VCS
]);
963 case I915_PARAM_HAS_BLT
:
964 value
= intel_ring_initialized(&dev_priv
->ring
[BCS
]);
966 case I915_PARAM_HAS_VEBOX
:
967 value
= intel_ring_initialized(&dev_priv
->ring
[VECS
]);
969 case I915_PARAM_HAS_RELAXED_FENCING
:
972 case I915_PARAM_HAS_COHERENT_RINGS
:
975 case I915_PARAM_HAS_EXEC_CONSTANTS
:
976 value
= INTEL_INFO(dev
)->gen
>= 4;
978 case I915_PARAM_HAS_RELAXED_DELTA
:
981 case I915_PARAM_HAS_GEN7_SOL_RESET
:
984 case I915_PARAM_HAS_LLC
:
985 value
= HAS_LLC(dev
);
987 case I915_PARAM_HAS_WT
:
990 case I915_PARAM_HAS_ALIASING_PPGTT
:
991 value
= dev_priv
->mm
.aliasing_ppgtt
? 1 : 0;
993 case I915_PARAM_HAS_WAIT_TIMEOUT
:
996 case I915_PARAM_HAS_SEMAPHORES
:
997 value
= i915_semaphore_is_enabled(dev
);
999 case I915_PARAM_HAS_PRIME_VMAP_FLUSH
:
1002 case I915_PARAM_HAS_SECURE_BATCHES
:
1003 value
= capable(CAP_SYS_ADMIN
);
1005 case I915_PARAM_HAS_PINNED_BATCHES
:
1008 case I915_PARAM_HAS_EXEC_NO_RELOC
:
1011 case I915_PARAM_HAS_EXEC_HANDLE_LUT
:
1015 DRM_DEBUG("Unknown parameter %d\n", param
->param
);
1019 if (DRM_COPY_TO_USER(param
->value
, &value
, sizeof(int))) {
1020 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1027 static int i915_setparam(struct drm_device
*dev
, void *data
,
1028 struct drm_file
*file_priv
)
1030 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1031 drm_i915_setparam_t
*param
= data
;
1034 DRM_ERROR("called with no initialization\n");
1038 switch (param
->param
) {
1039 case I915_SETPARAM_USE_MI_BATCHBUFFER_START
:
1041 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY
:
1043 case I915_SETPARAM_ALLOW_BATCHBUFFER
:
1044 dev_priv
->dri1
.allow_batchbuffer
= param
->value
? 1 : 0;
1046 case I915_SETPARAM_NUM_USED_FENCES
:
1047 if (param
->value
> dev_priv
->num_fence_regs
||
1050 /* Userspace can use first N regs */
1051 dev_priv
->fence_reg_start
= param
->value
;
1054 DRM_DEBUG_DRIVER("unknown parameter %d\n",
1062 static int i915_set_status_page(struct drm_device
*dev
, void *data
,
1063 struct drm_file
*file_priv
)
1065 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1066 drm_i915_hws_addr_t
*hws
= data
;
1067 struct intel_ring_buffer
*ring
;
1069 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1072 if (!I915_NEED_GFX_HWS(dev
))
1076 DRM_ERROR("called with no initialization\n");
1080 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1081 WARN(1, "tried to set status page when mode setting active\n");
1085 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32
)hws
->addr
);
1087 ring
= LP_RING(dev_priv
);
1088 ring
->status_page
.gfx_addr
= hws
->addr
& (0x1ffff<<12);
1090 dev_priv
->dri1
.gfx_hws_cpu_addr
=
1091 ioremap_wc(dev_priv
->gtt
.mappable_base
+ hws
->addr
, 4096);
1092 if (dev_priv
->dri1
.gfx_hws_cpu_addr
== NULL
) {
1093 i915_dma_cleanup(dev
);
1094 ring
->status_page
.gfx_addr
= 0;
1095 DRM_ERROR("can not ioremap virtual address for"
1096 " G33 hw status page\n");
1100 memset_io(dev_priv
->dri1
.gfx_hws_cpu_addr
, 0, PAGE_SIZE
);
1101 I915_WRITE(HWS_PGA
, ring
->status_page
.gfx_addr
);
1103 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1104 ring
->status_page
.gfx_addr
);
1105 DRM_DEBUG_DRIVER("load hws at %p\n",
1106 ring
->status_page
.page_addr
);
1110 static int i915_get_bridge_dev(struct drm_device
*dev
)
1112 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1114 dev_priv
->bridge_dev
= pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
1115 if (!dev_priv
->bridge_dev
) {
1116 DRM_ERROR("bridge device not found\n");
1122 #define MCHBAR_I915 0x44
1123 #define MCHBAR_I965 0x48
1124 #define MCHBAR_SIZE (4*4096)
1126 #define DEVEN_REG 0x54
1127 #define DEVEN_MCHBAR_EN (1 << 28)
1129 /* Allocate space for the MCH regs if needed, return nonzero on error */
1131 intel_alloc_mchbar_resource(struct drm_device
*dev
)
1133 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1134 int reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
1135 u32 temp_lo
, temp_hi
= 0;
1139 if (INTEL_INFO(dev
)->gen
>= 4)
1140 pci_read_config_dword(dev_priv
->bridge_dev
, reg
+ 4, &temp_hi
);
1141 pci_read_config_dword(dev_priv
->bridge_dev
, reg
, &temp_lo
);
1142 mchbar_addr
= ((u64
)temp_hi
<< 32) | temp_lo
;
1144 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1147 pnp_range_reserved(mchbar_addr
, mchbar_addr
+ MCHBAR_SIZE
))
1151 /* Get some space for it */
1152 dev_priv
->mch_res
.name
= "i915 MCHBAR";
1153 dev_priv
->mch_res
.flags
= IORESOURCE_MEM
;
1154 ret
= pci_bus_alloc_resource(dev_priv
->bridge_dev
->bus
,
1156 MCHBAR_SIZE
, MCHBAR_SIZE
,
1158 0, pcibios_align_resource
,
1159 dev_priv
->bridge_dev
);
1161 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret
);
1162 dev_priv
->mch_res
.start
= 0;
1166 if (INTEL_INFO(dev
)->gen
>= 4)
1167 pci_write_config_dword(dev_priv
->bridge_dev
, reg
+ 4,
1168 upper_32_bits(dev_priv
->mch_res
.start
));
1170 pci_write_config_dword(dev_priv
->bridge_dev
, reg
,
1171 lower_32_bits(dev_priv
->mch_res
.start
));
1175 /* Setup MCHBAR if possible, return true if we should disable it again */
1177 intel_setup_mchbar(struct drm_device
*dev
)
1179 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1180 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
1184 dev_priv
->mchbar_need_disable
= false;
1186 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
1187 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
1188 enabled
= !!(temp
& DEVEN_MCHBAR_EN
);
1190 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
1194 /* If it's already enabled, don't have to do anything */
1198 if (intel_alloc_mchbar_resource(dev
))
1201 dev_priv
->mchbar_need_disable
= true;
1203 /* Space is allocated or reserved, so enable it. */
1204 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
1205 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
,
1206 temp
| DEVEN_MCHBAR_EN
);
1208 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
1209 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
| 1);
1214 intel_teardown_mchbar(struct drm_device
*dev
)
1216 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1217 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
1220 if (dev_priv
->mchbar_need_disable
) {
1221 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
1222 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
1223 temp
&= ~DEVEN_MCHBAR_EN
;
1224 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, temp
);
1226 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
1228 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
);
1232 if (dev_priv
->mch_res
.start
)
1233 release_resource(&dev_priv
->mch_res
);
1236 /* true = enable decode, false = disable decoder */
1237 static unsigned int i915_vga_set_decode(void *cookie
, bool state
)
1239 struct drm_device
*dev
= cookie
;
1241 intel_modeset_vga_set_state(dev
, state
);
1243 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
1244 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1246 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1249 static void i915_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
1251 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1252 pm_message_t pmm
= { .event
= PM_EVENT_SUSPEND
};
1253 if (state
== VGA_SWITCHEROO_ON
) {
1254 pr_info("switched on\n");
1255 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1256 /* i915 resume handler doesn't set to D0 */
1257 pci_set_power_state(dev
->pdev
, PCI_D0
);
1259 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
1261 pr_err("switched off\n");
1262 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1263 i915_suspend(dev
, pmm
);
1264 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
1268 static bool i915_switcheroo_can_switch(struct pci_dev
*pdev
)
1270 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1273 spin_lock(&dev
->count_lock
);
1274 can_switch
= (dev
->open_count
== 0);
1275 spin_unlock(&dev
->count_lock
);
1279 static const struct vga_switcheroo_client_ops i915_switcheroo_ops
= {
1280 .set_gpu_state
= i915_switcheroo_set_state
,
1282 .can_switch
= i915_switcheroo_can_switch
,
1285 static int i915_load_modeset_init(struct drm_device
*dev
)
1287 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1290 ret
= intel_parse_bios(dev
);
1292 DRM_INFO("failed to find VBIOS tables\n");
1294 /* If we have > 1 VGA cards, then we need to arbitrate access
1295 * to the common VGA resources.
1297 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1298 * then we do not take part in VGA arbitration and the
1299 * vga_client_register() fails with -ENODEV.
1301 ret
= vga_client_register(dev
->pdev
, dev
, NULL
, i915_vga_set_decode
);
1302 if (ret
&& ret
!= -ENODEV
)
1305 intel_register_dsm_handler();
1307 ret
= vga_switcheroo_register_client(dev
->pdev
, &i915_switcheroo_ops
, false);
1309 goto cleanup_vga_client
;
1311 /* Initialise stolen first so that we may reserve preallocated
1312 * objects for the BIOS to KMS transition.
1314 ret
= i915_gem_init_stolen(dev
);
1316 goto cleanup_vga_switcheroo
;
1318 ret
= drm_irq_install(dev
);
1320 goto cleanup_gem_stolen
;
1322 /* Important: The output setup functions called by modeset_init need
1323 * working irqs for e.g. gmbus and dp aux transfers. */
1324 intel_modeset_init(dev
);
1326 ret
= i915_gem_init(dev
);
1330 INIT_WORK(&dev_priv
->console_resume_work
, intel_console_resume
);
1332 intel_modeset_gem_init(dev
);
1334 /* Always safe in the mode setting case. */
1335 /* FIXME: do pre/post-mode set stuff in core KMS code */
1336 dev
->vblank_disable_allowed
= 1;
1337 if (INTEL_INFO(dev
)->num_pipes
== 0)
1340 ret
= intel_fbdev_init(dev
);
1344 /* Only enable hotplug handling once the fbdev is fully set up. */
1345 intel_hpd_init(dev
);
1348 * Some ports require correctly set-up hpd registers for detection to
1349 * work properly (leading to ghost connected connector status), e.g. VGA
1350 * on gm45. Hence we can only set up the initial fbdev config after hpd
1351 * irqs are fully enabled. Now we should scan for the initial config
1352 * only once hotplug handling is enabled, but due to screwed-up locking
1353 * around kms/fbdev init we can't protect the fdbev initial config
1354 * scanning against hotplug events. Hence do this first and ignore the
1355 * tiny window where we will loose hotplug notifactions.
1357 intel_fbdev_initial_config(dev
);
1359 /* Only enable hotplug handling once the fbdev is fully set up. */
1360 dev_priv
->enable_hotplug_processing
= true;
1362 drm_kms_helper_poll_init(dev
);
1367 mutex_lock(&dev
->struct_mutex
);
1368 i915_gem_cleanup_ringbuffer(dev
);
1369 i915_gem_context_fini(dev
);
1370 mutex_unlock(&dev
->struct_mutex
);
1371 i915_gem_cleanup_aliasing_ppgtt(dev
);
1372 drm_mm_takedown(&dev_priv
->gtt
.base
.mm
);
1374 drm_irq_uninstall(dev
);
1376 i915_gem_cleanup_stolen(dev
);
1377 cleanup_vga_switcheroo
:
1378 vga_switcheroo_unregister_client(dev
->pdev
);
1380 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1385 int i915_master_create(struct drm_device
*dev
, struct drm_master
*master
)
1387 struct drm_i915_master_private
*master_priv
;
1389 master_priv
= kzalloc(sizeof(*master_priv
), GFP_KERNEL
);
1393 master
->driver_priv
= master_priv
;
1397 void i915_master_destroy(struct drm_device
*dev
, struct drm_master
*master
)
1399 struct drm_i915_master_private
*master_priv
= master
->driver_priv
;
1406 master
->driver_priv
= NULL
;
1409 static void i915_kick_out_firmware_fb(struct drm_i915_private
*dev_priv
)
1411 struct apertures_struct
*ap
;
1412 struct pci_dev
*pdev
= dev_priv
->dev
->pdev
;
1415 ap
= alloc_apertures(1);
1419 ap
->ranges
[0].base
= dev_priv
->gtt
.mappable_base
;
1420 ap
->ranges
[0].size
= dev_priv
->gtt
.mappable_end
;
1423 pdev
->resource
[PCI_ROM_RESOURCE
].flags
& IORESOURCE_ROM_SHADOW
;
1425 remove_conflicting_framebuffers(ap
, "inteldrmfb", primary
);
1430 static void i915_dump_device_info(struct drm_i915_private
*dev_priv
)
1432 const struct intel_device_info
*info
= dev_priv
->info
;
1434 #define PRINT_S(name) "%s"
1436 #define PRINT_FLAG(name) info->name ? #name "," : ""
1438 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1439 DEV_INFO_FOR_EACH_FLAG(PRINT_S
, SEP_EMPTY
),
1441 dev_priv
->dev
->pdev
->device
,
1442 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_COMMA
));
1450 * i915_driver_load - setup chip and create an initial config
1452 * @flags: startup flags
1454 * The driver load routine has to do several things:
1455 * - drive output discovery via intel_modeset_init()
1456 * - initialize the memory manager
1457 * - allocate initial config memory
1458 * - setup the DRM framebuffer with the allocated memory
1460 int i915_driver_load(struct drm_device
*dev
, unsigned long flags
)
1462 struct drm_i915_private
*dev_priv
;
1463 struct intel_device_info
*info
;
1464 int ret
= 0, mmio_bar
, mmio_size
;
1465 uint32_t aperture_size
;
1467 info
= (struct intel_device_info
*) flags
;
1469 /* Refuse to load on gen6+ without kms enabled. */
1470 if (info
->gen
>= 6 && !drm_core_check_feature(dev
, DRIVER_MODESET
))
1473 /* i915 has 4 more counters */
1475 dev
->types
[6] = _DRM_STAT_IRQ
;
1476 dev
->types
[7] = _DRM_STAT_PRIMARY
;
1477 dev
->types
[8] = _DRM_STAT_SECONDARY
;
1478 dev
->types
[9] = _DRM_STAT_DMA
;
1480 dev_priv
= kzalloc(sizeof(drm_i915_private_t
), GFP_KERNEL
);
1481 if (dev_priv
== NULL
)
1484 dev
->dev_private
= (void *)dev_priv
;
1485 dev_priv
->dev
= dev
;
1486 dev_priv
->info
= info
;
1488 spin_lock_init(&dev_priv
->irq_lock
);
1489 spin_lock_init(&dev_priv
->gpu_error
.lock
);
1490 spin_lock_init(&dev_priv
->backlight
.lock
);
1491 spin_lock_init(&dev_priv
->uncore
.lock
);
1492 spin_lock_init(&dev_priv
->mm
.object_stat_lock
);
1493 mutex_init(&dev_priv
->dpio_lock
);
1494 mutex_init(&dev_priv
->rps
.hw_lock
);
1495 mutex_init(&dev_priv
->modeset_restore_lock
);
1497 mutex_init(&dev_priv
->pc8
.lock
);
1498 dev_priv
->pc8
.requirements_met
= false;
1499 dev_priv
->pc8
.gpu_idle
= false;
1500 dev_priv
->pc8
.irqs_disabled
= false;
1501 dev_priv
->pc8
.enabled
= false;
1502 dev_priv
->pc8
.disable_count
= 2; /* requirements_met + gpu_idle */
1503 INIT_DELAYED_WORK(&dev_priv
->pc8
.enable_work
, hsw_enable_pc8_work
);
1505 i915_dump_device_info(dev_priv
);
1507 /* Not all pre-production machines fall into this category, only the
1508 * very first ones. Almost everything should work, except for maybe
1509 * suspend/resume. And we don't implement workarounds that affect only
1510 * pre-production machines. */
1511 if (IS_HSW_EARLY_SDV(dev
))
1512 DRM_INFO("This is an early pre-production Haswell machine. "
1513 "It may not be fully functional.\n");
1515 if (i915_get_bridge_dev(dev
)) {
1520 mmio_bar
= IS_GEN2(dev
) ? 1 : 0;
1521 /* Before gen4, the registers and the GTT are behind different BARs.
1522 * However, from gen4 onwards, the registers and the GTT are shared
1523 * in the same BAR, so we want to restrict this ioremap from
1524 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1525 * the register BAR remains the same size for all the earlier
1526 * generations up to Ironlake.
1529 mmio_size
= 512*1024;
1531 mmio_size
= 2*1024*1024;
1533 dev_priv
->regs
= pci_iomap(dev
->pdev
, mmio_bar
, mmio_size
);
1534 if (!dev_priv
->regs
) {
1535 DRM_ERROR("failed to map registers\n");
1540 intel_uncore_early_sanitize(dev
);
1542 if (IS_HASWELL(dev
) && (I915_READ(HSW_EDRAM_PRESENT
) == 1)) {
1543 /* The docs do not explain exactly how the calculation can be
1544 * made. It is somewhat guessable, but for now, it's always
1546 * NB: We can't write IDICR yet because we do not have gt funcs
1548 dev_priv
->ellc_size
= 128;
1549 DRM_INFO("Found %zuMB of eLLC\n", dev_priv
->ellc_size
);
1552 ret
= i915_gem_gtt_init(dev
);
1556 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1557 i915_kick_out_firmware_fb(dev_priv
);
1559 pci_set_master(dev
->pdev
);
1561 /* overlay on gen2 is broken and can't address above 1G */
1563 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(30));
1565 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1566 * using 32bit addressing, overwriting memory if HWS is located
1569 * The documentation also mentions an issue with undefined
1570 * behaviour if any general state is accessed within a page above 4GB,
1571 * which also needs to be handled carefully.
1573 if (IS_BROADWATER(dev
) || IS_CRESTLINE(dev
))
1574 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(32));
1576 aperture_size
= dev_priv
->gtt
.mappable_end
;
1578 dev_priv
->gtt
.mappable
=
1579 io_mapping_create_wc(dev_priv
->gtt
.mappable_base
,
1581 if (dev_priv
->gtt
.mappable
== NULL
) {
1586 dev_priv
->gtt
.mtrr
= arch_phys_wc_add(dev_priv
->gtt
.mappable_base
,
1589 /* The i915 workqueue is primarily used for batched retirement of
1590 * requests (and thus managing bo) once the task has been completed
1591 * by the GPU. i915_gem_retire_requests() is called directly when we
1592 * need high-priority retirement, such as waiting for an explicit
1595 * It is also used for periodic low-priority events, such as
1596 * idle-timers and recording error state.
1598 * All tasks on the workqueue are expected to acquire the dev mutex
1599 * so there is no point in running more than one instance of the
1600 * workqueue at any time. Use an ordered one.
1602 dev_priv
->wq
= alloc_ordered_workqueue("i915", 0);
1603 if (dev_priv
->wq
== NULL
) {
1604 DRM_ERROR("Failed to create our workqueue.\n");
1609 /* This must be called before any calls to HAS_PCH_* */
1610 intel_detect_pch(dev
);
1612 intel_irq_init(dev
);
1614 intel_uncore_sanitize(dev
);
1615 intel_uncore_init(dev
);
1617 /* Try to make sure MCHBAR is enabled before poking at it */
1618 intel_setup_mchbar(dev
);
1619 intel_setup_gmbus(dev
);
1620 intel_opregion_setup(dev
);
1622 intel_setup_bios(dev
);
1626 /* On the 945G/GM, the chipset reports the MSI capability on the
1627 * integrated graphics even though the support isn't actually there
1628 * according to the published specs. It doesn't appear to function
1629 * correctly in testing on 945G.
1630 * This may be a side effect of MSI having been made available for PEG
1631 * and the registers being closely associated.
1633 * According to chipset errata, on the 965GM, MSI interrupts may
1634 * be lost or delayed, but we use them anyways to avoid
1635 * stuck interrupts on some machines.
1637 if (!IS_I945G(dev
) && !IS_I945GM(dev
))
1638 pci_enable_msi(dev
->pdev
);
1640 dev_priv
->num_plane
= 1;
1641 if (IS_VALLEYVIEW(dev
))
1642 dev_priv
->num_plane
= 2;
1644 if (INTEL_INFO(dev
)->num_pipes
) {
1645 ret
= drm_vblank_init(dev
, INTEL_INFO(dev
)->num_pipes
);
1647 goto out_gem_unload
;
1650 if (HAS_POWER_WELL(dev
))
1651 i915_init_power_well(dev
);
1653 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1654 ret
= i915_load_modeset_init(dev
);
1656 DRM_ERROR("failed to init modeset\n");
1657 goto out_gem_unload
;
1660 /* Start out suspended in ums mode. */
1661 dev_priv
->ums
.mm_suspended
= 1;
1664 i915_setup_sysfs(dev
);
1666 if (INTEL_INFO(dev
)->num_pipes
) {
1667 /* Must be done after probing outputs */
1668 intel_opregion_init(dev
);
1669 acpi_video_register();
1673 intel_gpu_ips_init(dev_priv
);
1678 if (dev_priv
->mm
.inactive_shrinker
.scan_objects
)
1679 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
1681 if (dev
->pdev
->msi_enabled
)
1682 pci_disable_msi(dev
->pdev
);
1684 intel_teardown_gmbus(dev
);
1685 intel_teardown_mchbar(dev
);
1686 pm_qos_remove_request(&dev_priv
->pm_qos
);
1687 destroy_workqueue(dev_priv
->wq
);
1689 arch_phys_wc_del(dev_priv
->gtt
.mtrr
);
1690 io_mapping_free(dev_priv
->gtt
.mappable
);
1691 dev_priv
->gtt
.base
.cleanup(&dev_priv
->gtt
.base
);
1693 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
1695 pci_dev_put(dev_priv
->bridge_dev
);
1701 int i915_driver_unload(struct drm_device
*dev
)
1703 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1706 intel_gpu_ips_teardown();
1708 if (HAS_POWER_WELL(dev
)) {
1709 /* The i915.ko module is still not prepared to be loaded when
1710 * the power well is not enabled, so just enable it in case
1711 * we're going to unload/reload. */
1712 intel_set_power_well(dev
, true);
1713 i915_remove_power_well(dev
);
1716 i915_teardown_sysfs(dev
);
1718 if (dev_priv
->mm
.inactive_shrinker
.scan_objects
)
1719 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
1721 mutex_lock(&dev
->struct_mutex
);
1722 ret
= i915_gpu_idle(dev
);
1724 DRM_ERROR("failed to idle hardware: %d\n", ret
);
1725 i915_gem_retire_requests(dev
);
1726 mutex_unlock(&dev
->struct_mutex
);
1728 /* Cancel the retire work handler, which should be idle now. */
1729 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
1731 io_mapping_free(dev_priv
->gtt
.mappable
);
1732 arch_phys_wc_del(dev_priv
->gtt
.mtrr
);
1734 acpi_video_unregister();
1736 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1737 intel_fbdev_fini(dev
);
1738 intel_modeset_cleanup(dev
);
1739 cancel_work_sync(&dev_priv
->console_resume_work
);
1742 * free the memory space allocated for the child device
1743 * config parsed from VBT
1745 if (dev_priv
->vbt
.child_dev
&& dev_priv
->vbt
.child_dev_num
) {
1746 kfree(dev_priv
->vbt
.child_dev
);
1747 dev_priv
->vbt
.child_dev
= NULL
;
1748 dev_priv
->vbt
.child_dev_num
= 0;
1751 vga_switcheroo_unregister_client(dev
->pdev
);
1752 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1755 /* Free error state after interrupts are fully disabled. */
1756 del_timer_sync(&dev_priv
->gpu_error
.hangcheck_timer
);
1757 cancel_work_sync(&dev_priv
->gpu_error
.work
);
1758 i915_destroy_error_state(dev
);
1760 cancel_delayed_work_sync(&dev_priv
->pc8
.enable_work
);
1762 if (dev
->pdev
->msi_enabled
)
1763 pci_disable_msi(dev
->pdev
);
1765 intel_opregion_fini(dev
);
1767 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1768 /* Flush any outstanding unpin_work. */
1769 flush_workqueue(dev_priv
->wq
);
1771 mutex_lock(&dev
->struct_mutex
);
1772 i915_gem_free_all_phys_object(dev
);
1773 i915_gem_cleanup_ringbuffer(dev
);
1774 i915_gem_context_fini(dev
);
1775 mutex_unlock(&dev
->struct_mutex
);
1776 i915_gem_cleanup_aliasing_ppgtt(dev
);
1777 i915_gem_cleanup_stolen(dev
);
1779 if (!I915_NEED_GFX_HWS(dev
))
1783 list_del(&dev_priv
->gtt
.base
.global_link
);
1784 WARN_ON(!list_empty(&dev_priv
->vm_list
));
1785 drm_mm_takedown(&dev_priv
->gtt
.base
.mm
);
1786 if (dev_priv
->regs
!= NULL
)
1787 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
1789 intel_teardown_gmbus(dev
);
1790 intel_teardown_mchbar(dev
);
1792 destroy_workqueue(dev_priv
->wq
);
1793 pm_qos_remove_request(&dev_priv
->pm_qos
);
1795 dev_priv
->gtt
.base
.cleanup(&dev_priv
->gtt
.base
);
1798 kmem_cache_destroy(dev_priv
->slab
);
1800 pci_dev_put(dev_priv
->bridge_dev
);
1801 kfree(dev
->dev_private
);
1806 int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file
)
1808 struct drm_i915_file_private
*file_priv
;
1810 DRM_DEBUG_DRIVER("\n");
1811 file_priv
= kzalloc(sizeof(*file_priv
), GFP_KERNEL
);
1815 file
->driver_priv
= file_priv
;
1817 spin_lock_init(&file_priv
->mm
.lock
);
1818 INIT_LIST_HEAD(&file_priv
->mm
.request_list
);
1820 idr_init(&file_priv
->context_idr
);
1826 * i915_driver_lastclose - clean up after all DRM clients have exited
1829 * Take care of cleaning up after all DRM clients have exited. In the
1830 * mode setting case, we want to restore the kernel's initial mode (just
1831 * in case the last client left us in a bad state).
1833 * Additionally, in the non-mode setting case, we'll tear down the GTT
1834 * and DMA structures, since the kernel won't be using them, and clea
1837 void i915_driver_lastclose(struct drm_device
* dev
)
1839 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1841 /* On gen6+ we refuse to init without kms enabled, but then the drm core
1842 * goes right around and calls lastclose. Check for this and don't clean
1847 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1848 intel_fb_restore_mode(dev
);
1849 vga_switcheroo_process_delayed_switch();
1853 i915_gem_lastclose(dev
);
1855 i915_dma_cleanup(dev
);
1858 void i915_driver_preclose(struct drm_device
* dev
, struct drm_file
*file_priv
)
1860 mutex_lock(&dev
->struct_mutex
);
1861 i915_gem_context_close(dev
, file_priv
);
1862 i915_gem_release(dev
, file_priv
);
1863 mutex_unlock(&dev
->struct_mutex
);
1866 void i915_driver_postclose(struct drm_device
*dev
, struct drm_file
*file
)
1868 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
1873 const struct drm_ioctl_desc i915_ioctls
[] = {
1874 DRM_IOCTL_DEF_DRV(I915_INIT
, i915_dma_init
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1875 DRM_IOCTL_DEF_DRV(I915_FLUSH
, i915_flush_ioctl
, DRM_AUTH
),
1876 DRM_IOCTL_DEF_DRV(I915_FLIP
, i915_flip_bufs
, DRM_AUTH
),
1877 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER
, i915_batchbuffer
, DRM_AUTH
),
1878 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT
, i915_irq_emit
, DRM_AUTH
),
1879 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT
, i915_irq_wait
, DRM_AUTH
),
1880 DRM_IOCTL_DEF_DRV(I915_GETPARAM
, i915_getparam
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1881 DRM_IOCTL_DEF_DRV(I915_SETPARAM
, i915_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1882 DRM_IOCTL_DEF_DRV(I915_ALLOC
, drm_noop
, DRM_AUTH
),
1883 DRM_IOCTL_DEF_DRV(I915_FREE
, drm_noop
, DRM_AUTH
),
1884 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1885 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER
, i915_cmdbuffer
, DRM_AUTH
),
1886 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1887 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1888 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE
, i915_vblank_pipe_get
, DRM_AUTH
),
1889 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP
, i915_vblank_swap
, DRM_AUTH
),
1890 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR
, i915_set_status_page
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1891 DRM_IOCTL_DEF_DRV(I915_GEM_INIT
, i915_gem_init_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1892 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER
, i915_gem_execbuffer
, DRM_AUTH
|DRM_UNLOCKED
),
1893 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2
, i915_gem_execbuffer2
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1894 DRM_IOCTL_DEF_DRV(I915_GEM_PIN
, i915_gem_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1895 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN
, i915_gem_unpin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1896 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY
, i915_gem_busy_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1897 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING
, i915_gem_set_caching_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1898 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING
, i915_gem_get_caching_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1899 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE
, i915_gem_throttle_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1900 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT
, i915_gem_entervt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1901 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT
, i915_gem_leavevt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1902 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE
, i915_gem_create_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1903 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD
, i915_gem_pread_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1904 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE
, i915_gem_pwrite_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1905 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP
, i915_gem_mmap_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1906 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT
, i915_gem_mmap_gtt_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1907 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN
, i915_gem_set_domain_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1908 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH
, i915_gem_sw_finish_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1909 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING
, i915_gem_set_tiling
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1910 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING
, i915_gem_get_tiling
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1911 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE
, i915_gem_get_aperture_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1912 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID
, intel_get_pipe_from_crtc_id
, DRM_UNLOCKED
),
1913 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE
, i915_gem_madvise_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1914 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE
, intel_overlay_put_image
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1915 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS
, intel_overlay_attrs
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1916 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY
, intel_sprite_set_colorkey
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1917 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY
, intel_sprite_get_colorkey
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1918 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT
, i915_gem_wait_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1919 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE
, i915_gem_context_create_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1920 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY
, i915_gem_context_destroy_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1921 DRM_IOCTL_DEF_DRV(I915_REG_READ
, i915_reg_read_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1924 int i915_max_ioctl
= DRM_ARRAY_SIZE(i915_ioctls
);
1927 * This is really ugly: Because old userspace abused the linux agp interface to
1928 * manage the gtt, we need to claim that all intel devices are agp. For
1929 * otherwise the drm core refuses to initialize the agp support code.
1931 int i915_driver_device_is_agp(struct drm_device
* dev
)