1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31 #include "drm_crtc_helper.h"
32 #include "drm_fb_helper.h"
33 #include "intel_drv.h"
36 #include "i915_trace.h"
37 #include "../../../platform/x86/intel_ips.h"
38 #include <linux/pci.h>
39 #include <linux/vgaarb.h>
40 #include <linux/acpi.h>
41 #include <linux/pnp.h>
42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h>
44 #include <acpi/video.h>
46 static void i915_write_hws_pga(struct drm_device
*dev
)
48 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
51 addr
= dev_priv
->status_page_dmah
->busaddr
;
52 if (INTEL_INFO(dev
)->gen
>= 4)
53 addr
|= (dev_priv
->status_page_dmah
->busaddr
>> 28) & 0xf0;
54 I915_WRITE(HWS_PGA
, addr
);
58 * Sets up the hardware status page for devices that need a physical address
61 static int i915_init_phys_hws(struct drm_device
*dev
)
63 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
65 /* Program Hardware Status Page */
66 dev_priv
->status_page_dmah
=
67 drm_pci_alloc(dev
, PAGE_SIZE
, PAGE_SIZE
);
69 if (!dev_priv
->status_page_dmah
) {
70 DRM_ERROR("Can not allocate hardware status page\n");
74 memset_io((void __force __iomem
*)dev_priv
->status_page_dmah
->vaddr
,
77 i915_write_hws_pga(dev
);
79 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
84 * Frees the hardware status page, whether it's a physical address or a virtual
85 * address set up by the X Server.
87 static void i915_free_hws(struct drm_device
*dev
)
89 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
90 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
92 if (dev_priv
->status_page_dmah
) {
93 drm_pci_free(dev
, dev_priv
->status_page_dmah
);
94 dev_priv
->status_page_dmah
= NULL
;
97 if (ring
->status_page
.gfx_addr
) {
98 ring
->status_page
.gfx_addr
= 0;
99 drm_core_ioremapfree(&dev_priv
->hws_map
, dev
);
102 /* Need to rewrite hardware status page */
103 I915_WRITE(HWS_PGA
, 0x1ffff000);
106 void i915_kernel_lost_context(struct drm_device
* dev
)
108 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
109 struct drm_i915_master_private
*master_priv
;
110 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
113 * We should never lose context on the ring with modesetting
114 * as we don't expose it to userspace
116 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
119 ring
->head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
120 ring
->tail
= I915_READ_TAIL(ring
) & TAIL_ADDR
;
121 ring
->space
= ring
->head
- (ring
->tail
+ 8);
123 ring
->space
+= ring
->size
;
125 if (!dev
->primary
->master
)
128 master_priv
= dev
->primary
->master
->driver_priv
;
129 if (ring
->head
== ring
->tail
&& master_priv
->sarea_priv
)
130 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_RING_EMPTY
;
133 static int i915_dma_cleanup(struct drm_device
* dev
)
135 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
138 /* Make sure interrupts are disabled here because the uninstall ioctl
139 * may not have been called from userspace and after dev_private
140 * is freed, it's too late.
142 if (dev
->irq_enabled
)
143 drm_irq_uninstall(dev
);
145 mutex_lock(&dev
->struct_mutex
);
146 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
147 intel_cleanup_ring_buffer(&dev_priv
->ring
[i
]);
148 mutex_unlock(&dev
->struct_mutex
);
150 /* Clear the HWS virtual address at teardown */
151 if (I915_NEED_GFX_HWS(dev
))
157 static int i915_initialize(struct drm_device
* dev
, drm_i915_init_t
* init
)
159 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
160 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
163 master_priv
->sarea
= drm_getsarea(dev
);
164 if (master_priv
->sarea
) {
165 master_priv
->sarea_priv
= (drm_i915_sarea_t
*)
166 ((u8
*)master_priv
->sarea
->handle
+ init
->sarea_priv_offset
);
168 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
171 if (init
->ring_size
!= 0) {
172 if (LP_RING(dev_priv
)->obj
!= NULL
) {
173 i915_dma_cleanup(dev
);
174 DRM_ERROR("Client tried to initialize ringbuffer in "
179 ret
= intel_render_ring_init_dri(dev
,
183 i915_dma_cleanup(dev
);
188 dev_priv
->cpp
= init
->cpp
;
189 dev_priv
->back_offset
= init
->back_offset
;
190 dev_priv
->front_offset
= init
->front_offset
;
191 dev_priv
->current_page
= 0;
192 if (master_priv
->sarea_priv
)
193 master_priv
->sarea_priv
->pf_current_page
= 0;
195 /* Allow hardware batchbuffers unless told otherwise.
197 dev_priv
->allow_batchbuffer
= 1;
202 static int i915_dma_resume(struct drm_device
* dev
)
204 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
205 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
207 DRM_DEBUG_DRIVER("%s\n", __func__
);
209 if (ring
->map
.handle
== NULL
) {
210 DRM_ERROR("can not ioremap virtual address for"
215 /* Program Hardware Status Page */
216 if (!ring
->status_page
.page_addr
) {
217 DRM_ERROR("Can not find hardware status page\n");
220 DRM_DEBUG_DRIVER("hw status page @ %p\n",
221 ring
->status_page
.page_addr
);
222 if (ring
->status_page
.gfx_addr
!= 0)
223 intel_ring_setup_status_page(ring
);
225 i915_write_hws_pga(dev
);
227 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
232 static int i915_dma_init(struct drm_device
*dev
, void *data
,
233 struct drm_file
*file_priv
)
235 drm_i915_init_t
*init
= data
;
238 switch (init
->func
) {
240 retcode
= i915_initialize(dev
, init
);
242 case I915_CLEANUP_DMA
:
243 retcode
= i915_dma_cleanup(dev
);
245 case I915_RESUME_DMA
:
246 retcode
= i915_dma_resume(dev
);
256 /* Implement basically the same security restrictions as hardware does
257 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
259 * Most of the calculations below involve calculating the size of a
260 * particular instruction. It's important to get the size right as
261 * that tells us where the next instruction to check is. Any illegal
262 * instruction detected will be given a size of zero, which is a
263 * signal to abort the rest of the buffer.
265 static int validate_cmd(int cmd
)
267 switch (((cmd
>> 29) & 0x7)) {
269 switch ((cmd
>> 23) & 0x3f) {
271 return 1; /* MI_NOOP */
273 return 1; /* MI_FLUSH */
275 return 0; /* disallow everything else */
279 return 0; /* reserved */
281 return (cmd
& 0xff) + 2; /* 2d commands */
283 if (((cmd
>> 24) & 0x1f) <= 0x18)
286 switch ((cmd
>> 24) & 0x1f) {
290 switch ((cmd
>> 16) & 0xff) {
292 return (cmd
& 0x1f) + 2;
294 return (cmd
& 0xf) + 2;
296 return (cmd
& 0xffff) + 2;
300 return (cmd
& 0xffff) + 1;
304 if ((cmd
& (1 << 23)) == 0) /* inline vertices */
305 return (cmd
& 0x1ffff) + 2;
306 else if (cmd
& (1 << 17)) /* indirect random */
307 if ((cmd
& 0xffff) == 0)
308 return 0; /* unknown length, too hard */
310 return (((cmd
& 0xffff) + 1) / 2) + 1;
312 return 2; /* indirect sequential */
323 static int i915_emit_cmds(struct drm_device
* dev
, int *buffer
, int dwords
)
325 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
328 if ((dwords
+1) * sizeof(int) >= LP_RING(dev_priv
)->size
- 8)
331 for (i
= 0; i
< dwords
;) {
332 int sz
= validate_cmd(buffer
[i
]);
333 if (sz
== 0 || i
+ sz
> dwords
)
338 ret
= BEGIN_LP_RING((dwords
+1)&~1);
342 for (i
= 0; i
< dwords
; i
++)
353 i915_emit_box(struct drm_device
*dev
,
354 struct drm_clip_rect
*box
,
357 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
360 if (box
->y2
<= box
->y1
|| box
->x2
<= box
->x1
||
361 box
->y2
<= 0 || box
->x2
<= 0) {
362 DRM_ERROR("Bad box %d,%d..%d,%d\n",
363 box
->x1
, box
->y1
, box
->x2
, box
->y2
);
367 if (INTEL_INFO(dev
)->gen
>= 4) {
368 ret
= BEGIN_LP_RING(4);
372 OUT_RING(GFX_OP_DRAWRECT_INFO_I965
);
373 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
374 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
377 ret
= BEGIN_LP_RING(6);
381 OUT_RING(GFX_OP_DRAWRECT_INFO
);
383 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
384 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
393 /* XXX: Emitting the counter should really be moved to part of the IRQ
394 * emit. For now, do it in both places:
397 static void i915_emit_breadcrumb(struct drm_device
*dev
)
399 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
400 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
403 if (dev_priv
->counter
> 0x7FFFFFFFUL
)
404 dev_priv
->counter
= 0;
405 if (master_priv
->sarea_priv
)
406 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
;
408 if (BEGIN_LP_RING(4) == 0) {
409 OUT_RING(MI_STORE_DWORD_INDEX
);
410 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
411 OUT_RING(dev_priv
->counter
);
417 static int i915_dispatch_cmdbuffer(struct drm_device
* dev
,
418 drm_i915_cmdbuffer_t
*cmd
,
419 struct drm_clip_rect
*cliprects
,
422 int nbox
= cmd
->num_cliprects
;
423 int i
= 0, count
, ret
;
426 DRM_ERROR("alignment");
430 i915_kernel_lost_context(dev
);
432 count
= nbox
? nbox
: 1;
434 for (i
= 0; i
< count
; i
++) {
436 ret
= i915_emit_box(dev
, &cliprects
[i
],
442 ret
= i915_emit_cmds(dev
, cmdbuf
, cmd
->sz
/ 4);
447 i915_emit_breadcrumb(dev
);
451 static int i915_dispatch_batchbuffer(struct drm_device
* dev
,
452 drm_i915_batchbuffer_t
* batch
,
453 struct drm_clip_rect
*cliprects
)
455 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
456 int nbox
= batch
->num_cliprects
;
459 if ((batch
->start
| batch
->used
) & 0x7) {
460 DRM_ERROR("alignment");
464 i915_kernel_lost_context(dev
);
466 count
= nbox
? nbox
: 1;
467 for (i
= 0; i
< count
; i
++) {
469 ret
= i915_emit_box(dev
, &cliprects
[i
],
470 batch
->DR1
, batch
->DR4
);
475 if (!IS_I830(dev
) && !IS_845G(dev
)) {
476 ret
= BEGIN_LP_RING(2);
480 if (INTEL_INFO(dev
)->gen
>= 4) {
481 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6) | MI_BATCH_NON_SECURE_I965
);
482 OUT_RING(batch
->start
);
484 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6));
485 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
488 ret
= BEGIN_LP_RING(4);
492 OUT_RING(MI_BATCH_BUFFER
);
493 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
494 OUT_RING(batch
->start
+ batch
->used
- 4);
501 if (IS_G4X(dev
) || IS_GEN5(dev
)) {
502 if (BEGIN_LP_RING(2) == 0) {
503 OUT_RING(MI_FLUSH
| MI_NO_WRITE_FLUSH
| MI_INVALIDATE_ISP
);
509 i915_emit_breadcrumb(dev
);
513 static int i915_dispatch_flip(struct drm_device
* dev
)
515 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
516 struct drm_i915_master_private
*master_priv
=
517 dev
->primary
->master
->driver_priv
;
520 if (!master_priv
->sarea_priv
)
523 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
525 dev_priv
->current_page
,
526 master_priv
->sarea_priv
->pf_current_page
);
528 i915_kernel_lost_context(dev
);
530 ret
= BEGIN_LP_RING(10);
534 OUT_RING(MI_FLUSH
| MI_READ_FLUSH
);
537 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO
| ASYNC_FLIP
);
539 if (dev_priv
->current_page
== 0) {
540 OUT_RING(dev_priv
->back_offset
);
541 dev_priv
->current_page
= 1;
543 OUT_RING(dev_priv
->front_offset
);
544 dev_priv
->current_page
= 0;
548 OUT_RING(MI_WAIT_FOR_EVENT
| MI_WAIT_FOR_PLANE_A_FLIP
);
553 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
++;
555 if (BEGIN_LP_RING(4) == 0) {
556 OUT_RING(MI_STORE_DWORD_INDEX
);
557 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
558 OUT_RING(dev_priv
->counter
);
563 master_priv
->sarea_priv
->pf_current_page
= dev_priv
->current_page
;
567 static int i915_quiescent(struct drm_device
*dev
)
569 struct intel_ring_buffer
*ring
= LP_RING(dev
->dev_private
);
571 i915_kernel_lost_context(dev
);
572 return intel_wait_ring_idle(ring
);
575 static int i915_flush_ioctl(struct drm_device
*dev
, void *data
,
576 struct drm_file
*file_priv
)
580 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
582 mutex_lock(&dev
->struct_mutex
);
583 ret
= i915_quiescent(dev
);
584 mutex_unlock(&dev
->struct_mutex
);
589 static int i915_batchbuffer(struct drm_device
*dev
, void *data
,
590 struct drm_file
*file_priv
)
592 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
593 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
594 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
595 master_priv
->sarea_priv
;
596 drm_i915_batchbuffer_t
*batch
= data
;
598 struct drm_clip_rect
*cliprects
= NULL
;
600 if (!dev_priv
->allow_batchbuffer
) {
601 DRM_ERROR("Batchbuffer ioctl disabled\n");
605 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
606 batch
->start
, batch
->used
, batch
->num_cliprects
);
608 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
610 if (batch
->num_cliprects
< 0)
613 if (batch
->num_cliprects
) {
614 cliprects
= kcalloc(batch
->num_cliprects
,
615 sizeof(struct drm_clip_rect
),
617 if (cliprects
== NULL
)
620 ret
= copy_from_user(cliprects
, batch
->cliprects
,
621 batch
->num_cliprects
*
622 sizeof(struct drm_clip_rect
));
629 mutex_lock(&dev
->struct_mutex
);
630 ret
= i915_dispatch_batchbuffer(dev
, batch
, cliprects
);
631 mutex_unlock(&dev
->struct_mutex
);
634 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
642 static int i915_cmdbuffer(struct drm_device
*dev
, void *data
,
643 struct drm_file
*file_priv
)
645 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
646 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
647 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
648 master_priv
->sarea_priv
;
649 drm_i915_cmdbuffer_t
*cmdbuf
= data
;
650 struct drm_clip_rect
*cliprects
= NULL
;
654 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
655 cmdbuf
->buf
, cmdbuf
->sz
, cmdbuf
->num_cliprects
);
657 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
659 if (cmdbuf
->num_cliprects
< 0)
662 batch_data
= kmalloc(cmdbuf
->sz
, GFP_KERNEL
);
663 if (batch_data
== NULL
)
666 ret
= copy_from_user(batch_data
, cmdbuf
->buf
, cmdbuf
->sz
);
669 goto fail_batch_free
;
672 if (cmdbuf
->num_cliprects
) {
673 cliprects
= kcalloc(cmdbuf
->num_cliprects
,
674 sizeof(struct drm_clip_rect
), GFP_KERNEL
);
675 if (cliprects
== NULL
) {
677 goto fail_batch_free
;
680 ret
= copy_from_user(cliprects
, cmdbuf
->cliprects
,
681 cmdbuf
->num_cliprects
*
682 sizeof(struct drm_clip_rect
));
689 mutex_lock(&dev
->struct_mutex
);
690 ret
= i915_dispatch_cmdbuffer(dev
, cmdbuf
, cliprects
, batch_data
);
691 mutex_unlock(&dev
->struct_mutex
);
693 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
698 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
708 static int i915_flip_bufs(struct drm_device
*dev
, void *data
,
709 struct drm_file
*file_priv
)
713 DRM_DEBUG_DRIVER("%s\n", __func__
);
715 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
717 mutex_lock(&dev
->struct_mutex
);
718 ret
= i915_dispatch_flip(dev
);
719 mutex_unlock(&dev
->struct_mutex
);
724 static int i915_getparam(struct drm_device
*dev
, void *data
,
725 struct drm_file
*file_priv
)
727 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
728 drm_i915_getparam_t
*param
= data
;
732 DRM_ERROR("called with no initialization\n");
736 switch (param
->param
) {
737 case I915_PARAM_IRQ_ACTIVE
:
738 value
= dev
->pdev
->irq
? 1 : 0;
740 case I915_PARAM_ALLOW_BATCHBUFFER
:
741 value
= dev_priv
->allow_batchbuffer
? 1 : 0;
743 case I915_PARAM_LAST_DISPATCH
:
744 value
= READ_BREADCRUMB(dev_priv
);
746 case I915_PARAM_CHIPSET_ID
:
747 value
= dev
->pci_device
;
749 case I915_PARAM_HAS_GEM
:
750 value
= dev_priv
->has_gem
;
752 case I915_PARAM_NUM_FENCES_AVAIL
:
753 value
= dev_priv
->num_fence_regs
- dev_priv
->fence_reg_start
;
755 case I915_PARAM_HAS_OVERLAY
:
756 value
= dev_priv
->overlay
? 1 : 0;
758 case I915_PARAM_HAS_PAGEFLIPPING
:
761 case I915_PARAM_HAS_EXECBUF2
:
763 value
= dev_priv
->has_gem
;
765 case I915_PARAM_HAS_BSD
:
766 value
= HAS_BSD(dev
);
768 case I915_PARAM_HAS_BLT
:
769 value
= HAS_BLT(dev
);
771 case I915_PARAM_HAS_RELAXED_FENCING
:
774 case I915_PARAM_HAS_COHERENT_RINGS
:
777 case I915_PARAM_HAS_EXEC_CONSTANTS
:
778 value
= INTEL_INFO(dev
)->gen
>= 4;
780 case I915_PARAM_HAS_RELAXED_DELTA
:
784 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
789 if (DRM_COPY_TO_USER(param
->value
, &value
, sizeof(int))) {
790 DRM_ERROR("DRM_COPY_TO_USER failed\n");
797 static int i915_setparam(struct drm_device
*dev
, void *data
,
798 struct drm_file
*file_priv
)
800 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
801 drm_i915_setparam_t
*param
= data
;
804 DRM_ERROR("called with no initialization\n");
808 switch (param
->param
) {
809 case I915_SETPARAM_USE_MI_BATCHBUFFER_START
:
811 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY
:
812 dev_priv
->tex_lru_log_granularity
= param
->value
;
814 case I915_SETPARAM_ALLOW_BATCHBUFFER
:
815 dev_priv
->allow_batchbuffer
= param
->value
;
817 case I915_SETPARAM_NUM_USED_FENCES
:
818 if (param
->value
> dev_priv
->num_fence_regs
||
821 /* Userspace can use first N regs */
822 dev_priv
->fence_reg_start
= param
->value
;
825 DRM_DEBUG_DRIVER("unknown parameter %d\n",
833 static int i915_set_status_page(struct drm_device
*dev
, void *data
,
834 struct drm_file
*file_priv
)
836 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
837 drm_i915_hws_addr_t
*hws
= data
;
838 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
840 if (!I915_NEED_GFX_HWS(dev
))
844 DRM_ERROR("called with no initialization\n");
848 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
849 WARN(1, "tried to set status page when mode setting active\n");
853 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32
)hws
->addr
);
855 ring
->status_page
.gfx_addr
= hws
->addr
& (0x1ffff<<12);
857 dev_priv
->hws_map
.offset
= dev
->agp
->base
+ hws
->addr
;
858 dev_priv
->hws_map
.size
= 4*1024;
859 dev_priv
->hws_map
.type
= 0;
860 dev_priv
->hws_map
.flags
= 0;
861 dev_priv
->hws_map
.mtrr
= 0;
863 drm_core_ioremap_wc(&dev_priv
->hws_map
, dev
);
864 if (dev_priv
->hws_map
.handle
== NULL
) {
865 i915_dma_cleanup(dev
);
866 ring
->status_page
.gfx_addr
= 0;
867 DRM_ERROR("can not ioremap virtual address for"
868 " G33 hw status page\n");
871 ring
->status_page
.page_addr
=
872 (void __force __iomem
*)dev_priv
->hws_map
.handle
;
873 memset_io(ring
->status_page
.page_addr
, 0, PAGE_SIZE
);
874 I915_WRITE(HWS_PGA
, ring
->status_page
.gfx_addr
);
876 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
877 ring
->status_page
.gfx_addr
);
878 DRM_DEBUG_DRIVER("load hws at %p\n",
879 ring
->status_page
.page_addr
);
883 static int i915_get_bridge_dev(struct drm_device
*dev
)
885 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
887 dev_priv
->bridge_dev
= pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
888 if (!dev_priv
->bridge_dev
) {
889 DRM_ERROR("bridge device not found\n");
895 #define MCHBAR_I915 0x44
896 #define MCHBAR_I965 0x48
897 #define MCHBAR_SIZE (4*4096)
899 #define DEVEN_REG 0x54
900 #define DEVEN_MCHBAR_EN (1 << 28)
902 /* Allocate space for the MCH regs if needed, return nonzero on error */
904 intel_alloc_mchbar_resource(struct drm_device
*dev
)
906 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
907 int reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
908 u32 temp_lo
, temp_hi
= 0;
912 if (INTEL_INFO(dev
)->gen
>= 4)
913 pci_read_config_dword(dev_priv
->bridge_dev
, reg
+ 4, &temp_hi
);
914 pci_read_config_dword(dev_priv
->bridge_dev
, reg
, &temp_lo
);
915 mchbar_addr
= ((u64
)temp_hi
<< 32) | temp_lo
;
917 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
920 pnp_range_reserved(mchbar_addr
, mchbar_addr
+ MCHBAR_SIZE
))
924 /* Get some space for it */
925 dev_priv
->mch_res
.name
= "i915 MCHBAR";
926 dev_priv
->mch_res
.flags
= IORESOURCE_MEM
;
927 ret
= pci_bus_alloc_resource(dev_priv
->bridge_dev
->bus
,
929 MCHBAR_SIZE
, MCHBAR_SIZE
,
931 0, pcibios_align_resource
,
932 dev_priv
->bridge_dev
);
934 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret
);
935 dev_priv
->mch_res
.start
= 0;
939 if (INTEL_INFO(dev
)->gen
>= 4)
940 pci_write_config_dword(dev_priv
->bridge_dev
, reg
+ 4,
941 upper_32_bits(dev_priv
->mch_res
.start
));
943 pci_write_config_dword(dev_priv
->bridge_dev
, reg
,
944 lower_32_bits(dev_priv
->mch_res
.start
));
948 /* Setup MCHBAR if possible, return true if we should disable it again */
950 intel_setup_mchbar(struct drm_device
*dev
)
952 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
953 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
957 dev_priv
->mchbar_need_disable
= false;
959 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
960 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
961 enabled
= !!(temp
& DEVEN_MCHBAR_EN
);
963 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
967 /* If it's already enabled, don't have to do anything */
971 if (intel_alloc_mchbar_resource(dev
))
974 dev_priv
->mchbar_need_disable
= true;
976 /* Space is allocated or reserved, so enable it. */
977 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
978 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
,
979 temp
| DEVEN_MCHBAR_EN
);
981 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
982 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
| 1);
987 intel_teardown_mchbar(struct drm_device
*dev
)
989 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
990 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
993 if (dev_priv
->mchbar_need_disable
) {
994 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
995 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
996 temp
&= ~DEVEN_MCHBAR_EN
;
997 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, temp
);
999 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
1001 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
);
1005 if (dev_priv
->mch_res
.start
)
1006 release_resource(&dev_priv
->mch_res
);
1009 #define PTE_ADDRESS_MASK 0xfffff000
1010 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1011 #define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1012 #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1013 #define PTE_MAPPING_TYPE_CACHED (3 << 1)
1014 #define PTE_MAPPING_TYPE_MASK (3 << 1)
1015 #define PTE_VALID (1 << 0)
1018 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
1021 * @offset: address to translate
1023 * Some chip functions require allocations from stolen space and need the
1024 * physical address of the memory in question.
1026 static unsigned long i915_stolen_to_phys(struct drm_device
*dev
, u32 offset
)
1028 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1029 struct pci_dev
*pdev
= dev_priv
->bridge_dev
;
1033 /* On the machines I have tested the Graphics Base of Stolen Memory
1034 * is unreliable, so compute the base by subtracting the stolen memory
1035 * from the Top of Low Usable DRAM which is where the BIOS places
1036 * the graphics stolen memory.
1038 if (INTEL_INFO(dev
)->gen
> 3 || IS_G33(dev
)) {
1039 /* top 32bits are reserved = 0 */
1040 pci_read_config_dword(pdev
, 0xA4, &base
);
1042 /* XXX presume 8xx is the same as i915 */
1043 pci_bus_read_config_dword(pdev
->bus
, 2, 0x5C, &base
);
1046 if (INTEL_INFO(dev
)->gen
> 3 || IS_G33(dev
)) {
1048 pci_read_config_word(pdev
, 0xb0, &val
);
1049 base
= val
>> 4 << 20;
1052 pci_read_config_byte(pdev
, 0x9c, &val
);
1053 base
= val
>> 3 << 27;
1055 base
-= dev_priv
->mm
.gtt
->stolen_size
;
1058 return base
+ offset
;
1061 static void i915_warn_stolen(struct drm_device
*dev
)
1063 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1064 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1067 static void i915_setup_compression(struct drm_device
*dev
, int size
)
1069 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1070 struct drm_mm_node
*compressed_fb
, *uninitialized_var(compressed_llb
);
1071 unsigned long cfb_base
;
1072 unsigned long ll_base
= 0;
1074 /* Just in case the BIOS is doing something questionable. */
1075 intel_disable_fbc(dev
);
1077 compressed_fb
= drm_mm_search_free(&dev_priv
->mm
.stolen
, size
, 4096, 0);
1079 compressed_fb
= drm_mm_get_block(compressed_fb
, size
, 4096);
1083 cfb_base
= i915_stolen_to_phys(dev
, compressed_fb
->start
);
1087 if (!(IS_GM45(dev
) || HAS_PCH_SPLIT(dev
))) {
1088 compressed_llb
= drm_mm_search_free(&dev_priv
->mm
.stolen
,
1091 compressed_llb
= drm_mm_get_block(compressed_llb
,
1093 if (!compressed_llb
)
1096 ll_base
= i915_stolen_to_phys(dev
, compressed_llb
->start
);
1101 dev_priv
->cfb_size
= size
;
1103 dev_priv
->compressed_fb
= compressed_fb
;
1104 if (HAS_PCH_SPLIT(dev
))
1105 I915_WRITE(ILK_DPFC_CB_BASE
, compressed_fb
->start
);
1106 else if (IS_GM45(dev
)) {
1107 I915_WRITE(DPFC_CB_BASE
, compressed_fb
->start
);
1109 I915_WRITE(FBC_CFB_BASE
, cfb_base
);
1110 I915_WRITE(FBC_LL_BASE
, ll_base
);
1111 dev_priv
->compressed_llb
= compressed_llb
;
1114 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
1115 cfb_base
, ll_base
, size
>> 20);
1119 drm_mm_put_block(compressed_llb
);
1121 drm_mm_put_block(compressed_fb
);
1123 dev_priv
->no_fbc_reason
= FBC_STOLEN_TOO_SMALL
;
1124 i915_warn_stolen(dev
);
1127 static void i915_cleanup_compression(struct drm_device
*dev
)
1129 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1131 drm_mm_put_block(dev_priv
->compressed_fb
);
1132 if (dev_priv
->compressed_llb
)
1133 drm_mm_put_block(dev_priv
->compressed_llb
);
1136 /* true = enable decode, false = disable decoder */
1137 static unsigned int i915_vga_set_decode(void *cookie
, bool state
)
1139 struct drm_device
*dev
= cookie
;
1141 intel_modeset_vga_set_state(dev
, state
);
1143 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
1144 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1146 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1149 static void i915_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
1151 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1152 pm_message_t pmm
= { .event
= PM_EVENT_SUSPEND
};
1153 if (state
== VGA_SWITCHEROO_ON
) {
1154 printk(KERN_INFO
"i915: switched on\n");
1155 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1156 /* i915 resume handler doesn't set to D0 */
1157 pci_set_power_state(dev
->pdev
, PCI_D0
);
1159 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
1161 printk(KERN_ERR
"i915: switched off\n");
1162 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1163 i915_suspend(dev
, pmm
);
1164 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
1168 static bool i915_switcheroo_can_switch(struct pci_dev
*pdev
)
1170 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1173 spin_lock(&dev
->count_lock
);
1174 can_switch
= (dev
->open_count
== 0);
1175 spin_unlock(&dev
->count_lock
);
1179 static int i915_load_gem_init(struct drm_device
*dev
)
1181 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1182 unsigned long prealloc_size
, gtt_size
, mappable_size
;
1185 prealloc_size
= dev_priv
->mm
.gtt
->stolen_size
;
1186 gtt_size
= dev_priv
->mm
.gtt
->gtt_total_entries
<< PAGE_SHIFT
;
1187 mappable_size
= dev_priv
->mm
.gtt
->gtt_mappable_entries
<< PAGE_SHIFT
;
1189 /* Basic memrange allocator for stolen space */
1190 drm_mm_init(&dev_priv
->mm
.stolen
, 0, prealloc_size
);
1192 /* Let GEM Manage all of the aperture.
1194 * However, leave one page at the end still bound to the scratch page.
1195 * There are a number of places where the hardware apparently
1196 * prefetches past the end of the object, and we've seen multiple
1197 * hangs with the GPU head pointer stuck in a batchbuffer bound
1198 * at the last page of the aperture. One page should be enough to
1199 * keep any prefetching inside of the aperture.
1201 i915_gem_do_init(dev
, 0, mappable_size
, gtt_size
- PAGE_SIZE
);
1203 mutex_lock(&dev
->struct_mutex
);
1204 ret
= i915_gem_init_ringbuffer(dev
);
1205 mutex_unlock(&dev
->struct_mutex
);
1209 /* Try to set up FBC with a reasonable compressed buffer size */
1210 if (I915_HAS_FBC(dev
) && i915_powersave
) {
1213 /* Leave 1M for line length buffer & misc. */
1215 /* Try to get a 32M buffer... */
1216 if (prealloc_size
> (36*1024*1024))
1217 cfb_size
= 32*1024*1024;
1218 else /* fall back to 7/8 of the stolen space */
1219 cfb_size
= prealloc_size
* 7 / 8;
1220 i915_setup_compression(dev
, cfb_size
);
1223 /* Allow hardware batchbuffers unless told otherwise. */
1224 dev_priv
->allow_batchbuffer
= 1;
1228 static int i915_load_modeset_init(struct drm_device
*dev
)
1230 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1233 ret
= intel_parse_bios(dev
);
1235 DRM_INFO("failed to find VBIOS tables\n");
1237 /* If we have > 1 VGA cards, then we need to arbitrate access
1238 * to the common VGA resources.
1240 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1241 * then we do not take part in VGA arbitration and the
1242 * vga_client_register() fails with -ENODEV.
1244 ret
= vga_client_register(dev
->pdev
, dev
, NULL
, i915_vga_set_decode
);
1245 if (ret
&& ret
!= -ENODEV
)
1248 intel_register_dsm_handler();
1250 ret
= vga_switcheroo_register_client(dev
->pdev
,
1251 i915_switcheroo_set_state
,
1253 i915_switcheroo_can_switch
);
1255 goto cleanup_vga_client
;
1257 /* IIR "flip pending" bit means done if this bit is set */
1258 if (IS_GEN3(dev
) && (I915_READ(ECOSKPD
) & ECO_FLIP_DONE
))
1259 dev_priv
->flip_pending_is_done
= true;
1261 intel_modeset_init(dev
);
1263 ret
= i915_load_gem_init(dev
);
1265 goto cleanup_vga_switcheroo
;
1267 intel_modeset_gem_init(dev
);
1269 ret
= drm_irq_install(dev
);
1273 /* Always safe in the mode setting case. */
1274 /* FIXME: do pre/post-mode set stuff in core KMS code */
1275 dev
->vblank_disable_allowed
= 1;
1277 ret
= intel_fbdev_init(dev
);
1281 drm_kms_helper_poll_init(dev
);
1283 /* We're off and running w/KMS */
1284 dev_priv
->mm
.suspended
= 0;
1289 drm_irq_uninstall(dev
);
1291 mutex_lock(&dev
->struct_mutex
);
1292 i915_gem_cleanup_ringbuffer(dev
);
1293 mutex_unlock(&dev
->struct_mutex
);
1294 cleanup_vga_switcheroo
:
1295 vga_switcheroo_unregister_client(dev
->pdev
);
1297 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1302 int i915_master_create(struct drm_device
*dev
, struct drm_master
*master
)
1304 struct drm_i915_master_private
*master_priv
;
1306 master_priv
= kzalloc(sizeof(*master_priv
), GFP_KERNEL
);
1310 master
->driver_priv
= master_priv
;
1314 void i915_master_destroy(struct drm_device
*dev
, struct drm_master
*master
)
1316 struct drm_i915_master_private
*master_priv
= master
->driver_priv
;
1323 master
->driver_priv
= NULL
;
1326 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
1328 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1331 tmp
= I915_READ(CLKCFG
);
1333 switch (tmp
& CLKCFG_FSB_MASK
) {
1334 case CLKCFG_FSB_533
:
1335 dev_priv
->fsb_freq
= 533; /* 133*4 */
1337 case CLKCFG_FSB_800
:
1338 dev_priv
->fsb_freq
= 800; /* 200*4 */
1340 case CLKCFG_FSB_667
:
1341 dev_priv
->fsb_freq
= 667; /* 167*4 */
1343 case CLKCFG_FSB_400
:
1344 dev_priv
->fsb_freq
= 400; /* 100*4 */
1348 switch (tmp
& CLKCFG_MEM_MASK
) {
1349 case CLKCFG_MEM_533
:
1350 dev_priv
->mem_freq
= 533;
1352 case CLKCFG_MEM_667
:
1353 dev_priv
->mem_freq
= 667;
1355 case CLKCFG_MEM_800
:
1356 dev_priv
->mem_freq
= 800;
1360 /* detect pineview DDR3 setting */
1361 tmp
= I915_READ(CSHRDDR3CTL
);
1362 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
1365 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
1367 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1370 ddrpll
= I915_READ16(DDRMPLL1
);
1371 csipll
= I915_READ16(CSIPLL0
);
1373 switch (ddrpll
& 0xff) {
1375 dev_priv
->mem_freq
= 800;
1378 dev_priv
->mem_freq
= 1066;
1381 dev_priv
->mem_freq
= 1333;
1384 dev_priv
->mem_freq
= 1600;
1387 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1389 dev_priv
->mem_freq
= 0;
1393 dev_priv
->r_t
= dev_priv
->mem_freq
;
1395 switch (csipll
& 0x3ff) {
1397 dev_priv
->fsb_freq
= 3200;
1400 dev_priv
->fsb_freq
= 3733;
1403 dev_priv
->fsb_freq
= 4266;
1406 dev_priv
->fsb_freq
= 4800;
1409 dev_priv
->fsb_freq
= 5333;
1412 dev_priv
->fsb_freq
= 5866;
1415 dev_priv
->fsb_freq
= 6400;
1418 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1420 dev_priv
->fsb_freq
= 0;
1424 if (dev_priv
->fsb_freq
== 3200) {
1426 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
1433 static const struct cparams
{
1439 { 1, 1333, 301, 28664 },
1440 { 1, 1066, 294, 24460 },
1441 { 1, 800, 294, 25192 },
1442 { 0, 1333, 276, 27605 },
1443 { 0, 1066, 276, 27605 },
1444 { 0, 800, 231, 23784 },
1447 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
1449 u64 total_count
, diff
, ret
;
1450 u32 count1
, count2
, count3
, m
= 0, c
= 0;
1451 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
1454 diff1
= now
- dev_priv
->last_time1
;
1456 count1
= I915_READ(DMIEC
);
1457 count2
= I915_READ(DDREC
);
1458 count3
= I915_READ(CSIEC
);
1460 total_count
= count1
+ count2
+ count3
;
1462 /* FIXME: handle per-counter overflow */
1463 if (total_count
< dev_priv
->last_count1
) {
1464 diff
= ~0UL - dev_priv
->last_count1
;
1465 diff
+= total_count
;
1467 diff
= total_count
- dev_priv
->last_count1
;
1470 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
1471 if (cparams
[i
].i
== dev_priv
->c_m
&&
1472 cparams
[i
].t
== dev_priv
->r_t
) {
1479 diff
= div_u64(diff
, diff1
);
1480 ret
= ((m
* diff
) + c
);
1481 ret
= div_u64(ret
, 10);
1483 dev_priv
->last_count1
= total_count
;
1484 dev_priv
->last_time1
= now
;
1489 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
1491 unsigned long m
, x
, b
;
1494 tsfs
= I915_READ(TSFS
);
1496 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
1497 x
= I915_READ8(TR1
);
1499 b
= tsfs
& TSFS_INTR_MASK
;
1501 return ((m
* x
) / 127) - b
;
1504 static u16
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
1506 static const struct v_table
{
1507 u16 vd
; /* in .1 mil */
1508 u16 vm
; /* in .1 mil */
1639 if (dev_priv
->info
->is_mobile
)
1640 return v_table
[pxvid
].vm
;
1642 return v_table
[pxvid
].vd
;
1645 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
1647 struct timespec now
, diff1
;
1649 unsigned long diffms
;
1652 getrawmonotonic(&now
);
1653 diff1
= timespec_sub(now
, dev_priv
->last_time2
);
1655 /* Don't divide by 0 */
1656 diffms
= diff1
.tv_sec
* 1000 + diff1
.tv_nsec
/ 1000000;
1660 count
= I915_READ(GFXEC
);
1662 if (count
< dev_priv
->last_count2
) {
1663 diff
= ~0UL - dev_priv
->last_count2
;
1666 diff
= count
- dev_priv
->last_count2
;
1669 dev_priv
->last_count2
= count
;
1670 dev_priv
->last_time2
= now
;
1672 /* More magic constants... */
1674 diff
= div_u64(diff
, diffms
* 10);
1675 dev_priv
->gfx_power
= diff
;
1678 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
1680 unsigned long t
, corr
, state1
, corr2
, state2
;
1683 pxvid
= I915_READ(PXVFREQ_BASE
+ (dev_priv
->cur_delay
* 4));
1684 pxvid
= (pxvid
>> 24) & 0x7f;
1685 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
1689 t
= i915_mch_val(dev_priv
);
1691 /* Revel in the empirically derived constants */
1693 /* Correction factor in 1/100000 units */
1695 corr
= ((t
* 2349) + 135940);
1697 corr
= ((t
* 964) + 29317);
1699 corr
= ((t
* 301) + 1004);
1701 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
1703 corr2
= (corr
* dev_priv
->corr
);
1705 state2
= (corr2
* state1
) / 10000;
1706 state2
/= 100; /* convert to mW */
1708 i915_update_gfx_val(dev_priv
);
1710 return dev_priv
->gfx_power
+ state2
;
1713 /* Global for IPS driver to get at the current i915 device */
1714 static struct drm_i915_private
*i915_mch_dev
;
1716 * Lock protecting IPS related data structures
1718 * - dev_priv->max_delay
1719 * - dev_priv->min_delay
1721 * - dev_priv->gpu_busy
1723 static DEFINE_SPINLOCK(mchdev_lock
);
1726 * i915_read_mch_val - return value for IPS use
1728 * Calculate and return a value for the IPS driver to use when deciding whether
1729 * we have thermal and power headroom to increase CPU or GPU power budget.
1731 unsigned long i915_read_mch_val(void)
1733 struct drm_i915_private
*dev_priv
;
1734 unsigned long chipset_val
, graphics_val
, ret
= 0;
1736 spin_lock(&mchdev_lock
);
1739 dev_priv
= i915_mch_dev
;
1741 chipset_val
= i915_chipset_val(dev_priv
);
1742 graphics_val
= i915_gfx_val(dev_priv
);
1744 ret
= chipset_val
+ graphics_val
;
1747 spin_unlock(&mchdev_lock
);
1751 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
1754 * i915_gpu_raise - raise GPU frequency limit
1756 * Raise the limit; IPS indicates we have thermal headroom.
1758 bool i915_gpu_raise(void)
1760 struct drm_i915_private
*dev_priv
;
1763 spin_lock(&mchdev_lock
);
1764 if (!i915_mch_dev
) {
1768 dev_priv
= i915_mch_dev
;
1770 if (dev_priv
->max_delay
> dev_priv
->fmax
)
1771 dev_priv
->max_delay
--;
1774 spin_unlock(&mchdev_lock
);
1778 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
1781 * i915_gpu_lower - lower GPU frequency limit
1783 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1784 * frequency maximum.
1786 bool i915_gpu_lower(void)
1788 struct drm_i915_private
*dev_priv
;
1791 spin_lock(&mchdev_lock
);
1792 if (!i915_mch_dev
) {
1796 dev_priv
= i915_mch_dev
;
1798 if (dev_priv
->max_delay
< dev_priv
->min_delay
)
1799 dev_priv
->max_delay
++;
1802 spin_unlock(&mchdev_lock
);
1806 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
1809 * i915_gpu_busy - indicate GPU business to IPS
1811 * Tell the IPS driver whether or not the GPU is busy.
1813 bool i915_gpu_busy(void)
1815 struct drm_i915_private
*dev_priv
;
1818 spin_lock(&mchdev_lock
);
1821 dev_priv
= i915_mch_dev
;
1823 ret
= dev_priv
->busy
;
1826 spin_unlock(&mchdev_lock
);
1830 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
1833 * i915_gpu_turbo_disable - disable graphics turbo
1835 * Disable graphics turbo by resetting the max frequency and setting the
1836 * current frequency to the default.
1838 bool i915_gpu_turbo_disable(void)
1840 struct drm_i915_private
*dev_priv
;
1843 spin_lock(&mchdev_lock
);
1844 if (!i915_mch_dev
) {
1848 dev_priv
= i915_mch_dev
;
1850 dev_priv
->max_delay
= dev_priv
->fstart
;
1852 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->fstart
))
1856 spin_unlock(&mchdev_lock
);
1860 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
1863 * Tells the intel_ips driver that the i915 driver is now loaded, if
1864 * IPS got loaded first.
1866 * This awkward dance is so that neither module has to depend on the
1867 * other in order for IPS to do the appropriate communication of
1868 * GPU turbo limits to i915.
1871 ips_ping_for_i915_load(void)
1875 link
= symbol_get(ips_link_to_i915_driver
);
1878 symbol_put(ips_link_to_i915_driver
);
1883 * i915_driver_load - setup chip and create an initial config
1885 * @flags: startup flags
1887 * The driver load routine has to do several things:
1888 * - drive output discovery via intel_modeset_init()
1889 * - initialize the memory manager
1890 * - allocate initial config memory
1891 * - setup the DRM framebuffer with the allocated memory
1893 int i915_driver_load(struct drm_device
*dev
, unsigned long flags
)
1895 struct drm_i915_private
*dev_priv
;
1896 int ret
= 0, mmio_bar
;
1899 /* i915 has 4 more counters */
1901 dev
->types
[6] = _DRM_STAT_IRQ
;
1902 dev
->types
[7] = _DRM_STAT_PRIMARY
;
1903 dev
->types
[8] = _DRM_STAT_SECONDARY
;
1904 dev
->types
[9] = _DRM_STAT_DMA
;
1906 dev_priv
= kzalloc(sizeof(drm_i915_private_t
), GFP_KERNEL
);
1907 if (dev_priv
== NULL
)
1910 dev
->dev_private
= (void *)dev_priv
;
1911 dev_priv
->dev
= dev
;
1912 dev_priv
->info
= (struct intel_device_info
*) flags
;
1914 if (i915_get_bridge_dev(dev
)) {
1919 /* overlay on gen2 is broken and can't address above 1G */
1921 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(30));
1923 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1924 * using 32bit addressing, overwriting memory if HWS is located
1927 * The documentation also mentions an issue with undefined
1928 * behaviour if any general state is accessed within a page above 4GB,
1929 * which also needs to be handled carefully.
1931 if (IS_BROADWATER(dev
) || IS_CRESTLINE(dev
))
1932 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(32));
1934 mmio_bar
= IS_GEN2(dev
) ? 1 : 0;
1935 dev_priv
->regs
= pci_iomap(dev
->pdev
, mmio_bar
, 0);
1936 if (!dev_priv
->regs
) {
1937 DRM_ERROR("failed to map registers\n");
1942 dev_priv
->mm
.gtt
= intel_gtt_get();
1943 if (!dev_priv
->mm
.gtt
) {
1944 DRM_ERROR("Failed to initialize GTT\n");
1949 agp_size
= dev_priv
->mm
.gtt
->gtt_mappable_entries
<< PAGE_SHIFT
;
1951 dev_priv
->mm
.gtt_mapping
=
1952 io_mapping_create_wc(dev
->agp
->base
, agp_size
);
1953 if (dev_priv
->mm
.gtt_mapping
== NULL
) {
1958 /* Set up a WC MTRR for non-PAT systems. This is more common than
1959 * one would think, because the kernel disables PAT on first
1960 * generation Core chips because WC PAT gets overridden by a UC
1961 * MTRR if present. Even if a UC MTRR isn't present.
1963 dev_priv
->mm
.gtt_mtrr
= mtrr_add(dev
->agp
->base
,
1965 MTRR_TYPE_WRCOMB
, 1);
1966 if (dev_priv
->mm
.gtt_mtrr
< 0) {
1967 DRM_INFO("MTRR allocation failed. Graphics "
1968 "performance may suffer.\n");
1971 /* The i915 workqueue is primarily used for batched retirement of
1972 * requests (and thus managing bo) once the task has been completed
1973 * by the GPU. i915_gem_retire_requests() is called directly when we
1974 * need high-priority retirement, such as waiting for an explicit
1977 * It is also used for periodic low-priority events, such as
1978 * idle-timers and recording error state.
1980 * All tasks on the workqueue are expected to acquire the dev mutex
1981 * so there is no point in running more than one instance of the
1982 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1984 dev_priv
->wq
= alloc_workqueue("i915",
1985 WQ_UNBOUND
| WQ_NON_REENTRANT
,
1987 if (dev_priv
->wq
== NULL
) {
1988 DRM_ERROR("Failed to create our workqueue.\n");
1993 /* enable GEM by default */
1994 dev_priv
->has_gem
= 1;
1996 intel_irq_init(dev
);
1998 /* Try to make sure MCHBAR is enabled before poking at it */
1999 intel_setup_mchbar(dev
);
2000 intel_setup_gmbus(dev
);
2001 intel_opregion_setup(dev
);
2003 /* Make sure the bios did its job and set up vital registers */
2004 intel_setup_bios(dev
);
2009 if (!I915_NEED_GFX_HWS(dev
)) {
2010 ret
= i915_init_phys_hws(dev
);
2012 goto out_gem_unload
;
2015 if (IS_PINEVIEW(dev
))
2016 i915_pineview_get_mem_freq(dev
);
2017 else if (IS_GEN5(dev
))
2018 i915_ironlake_get_mem_freq(dev
);
2020 /* On the 945G/GM, the chipset reports the MSI capability on the
2021 * integrated graphics even though the support isn't actually there
2022 * according to the published specs. It doesn't appear to function
2023 * correctly in testing on 945G.
2024 * This may be a side effect of MSI having been made available for PEG
2025 * and the registers being closely associated.
2027 * According to chipset errata, on the 965GM, MSI interrupts may
2028 * be lost or delayed, but we use them anyways to avoid
2029 * stuck interrupts on some machines.
2031 if (!IS_I945G(dev
) && !IS_I945GM(dev
))
2032 pci_enable_msi(dev
->pdev
);
2034 spin_lock_init(&dev_priv
->irq_lock
);
2035 spin_lock_init(&dev_priv
->error_lock
);
2036 spin_lock_init(&dev_priv
->rps_lock
);
2038 if (IS_MOBILE(dev
) || !IS_GEN2(dev
))
2039 dev_priv
->num_pipe
= 2;
2041 dev_priv
->num_pipe
= 1;
2043 ret
= drm_vblank_init(dev
, dev_priv
->num_pipe
);
2045 goto out_gem_unload
;
2047 /* Start out suspended */
2048 dev_priv
->mm
.suspended
= 1;
2050 intel_detect_pch(dev
);
2052 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2053 ret
= i915_load_modeset_init(dev
);
2055 DRM_ERROR("failed to init modeset\n");
2056 goto out_gem_unload
;
2060 /* Must be done after probing outputs */
2061 intel_opregion_init(dev
);
2062 acpi_video_register();
2064 setup_timer(&dev_priv
->hangcheck_timer
, i915_hangcheck_elapsed
,
2065 (unsigned long) dev
);
2067 spin_lock(&mchdev_lock
);
2068 i915_mch_dev
= dev_priv
;
2069 dev_priv
->mchdev_lock
= &mchdev_lock
;
2070 spin_unlock(&mchdev_lock
);
2072 ips_ping_for_i915_load();
2077 if (dev_priv
->mm
.inactive_shrinker
.shrink
)
2078 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
2080 if (dev
->pdev
->msi_enabled
)
2081 pci_disable_msi(dev
->pdev
);
2083 intel_teardown_gmbus(dev
);
2084 intel_teardown_mchbar(dev
);
2085 destroy_workqueue(dev_priv
->wq
);
2087 if (dev_priv
->mm
.gtt_mtrr
>= 0) {
2088 mtrr_del(dev_priv
->mm
.gtt_mtrr
, dev
->agp
->base
,
2089 dev
->agp
->agp_info
.aper_size
* 1024 * 1024);
2090 dev_priv
->mm
.gtt_mtrr
= -1;
2092 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2094 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
2096 pci_dev_put(dev_priv
->bridge_dev
);
2102 int i915_driver_unload(struct drm_device
*dev
)
2104 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2107 spin_lock(&mchdev_lock
);
2108 i915_mch_dev
= NULL
;
2109 spin_unlock(&mchdev_lock
);
2111 if (dev_priv
->mm
.inactive_shrinker
.shrink
)
2112 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
2114 mutex_lock(&dev
->struct_mutex
);
2115 ret
= i915_gpu_idle(dev
);
2117 DRM_ERROR("failed to idle hardware: %d\n", ret
);
2118 mutex_unlock(&dev
->struct_mutex
);
2120 /* Cancel the retire work handler, which should be idle now. */
2121 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2123 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2124 if (dev_priv
->mm
.gtt_mtrr
>= 0) {
2125 mtrr_del(dev_priv
->mm
.gtt_mtrr
, dev
->agp
->base
,
2126 dev
->agp
->agp_info
.aper_size
* 1024 * 1024);
2127 dev_priv
->mm
.gtt_mtrr
= -1;
2130 acpi_video_unregister();
2132 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2133 intel_fbdev_fini(dev
);
2134 intel_modeset_cleanup(dev
);
2137 * free the memory space allocated for the child device
2138 * config parsed from VBT
2140 if (dev_priv
->child_dev
&& dev_priv
->child_dev_num
) {
2141 kfree(dev_priv
->child_dev
);
2142 dev_priv
->child_dev
= NULL
;
2143 dev_priv
->child_dev_num
= 0;
2146 vga_switcheroo_unregister_client(dev
->pdev
);
2147 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
2150 /* Free error state after interrupts are fully disabled. */
2151 del_timer_sync(&dev_priv
->hangcheck_timer
);
2152 cancel_work_sync(&dev_priv
->error_work
);
2153 i915_destroy_error_state(dev
);
2155 if (dev
->pdev
->msi_enabled
)
2156 pci_disable_msi(dev
->pdev
);
2158 intel_opregion_fini(dev
);
2160 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2161 /* Flush any outstanding unpin_work. */
2162 flush_workqueue(dev_priv
->wq
);
2164 mutex_lock(&dev
->struct_mutex
);
2165 i915_gem_free_all_phys_object(dev
);
2166 i915_gem_cleanup_ringbuffer(dev
);
2167 mutex_unlock(&dev
->struct_mutex
);
2168 if (I915_HAS_FBC(dev
) && i915_powersave
)
2169 i915_cleanup_compression(dev
);
2170 drm_mm_takedown(&dev_priv
->mm
.stolen
);
2172 intel_cleanup_overlay(dev
);
2174 if (!I915_NEED_GFX_HWS(dev
))
2178 if (dev_priv
->regs
!= NULL
)
2179 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
2181 intel_teardown_gmbus(dev
);
2182 intel_teardown_mchbar(dev
);
2184 destroy_workqueue(dev_priv
->wq
);
2186 pci_dev_put(dev_priv
->bridge_dev
);
2187 kfree(dev
->dev_private
);
2192 int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file
)
2194 struct drm_i915_file_private
*file_priv
;
2196 DRM_DEBUG_DRIVER("\n");
2197 file_priv
= kmalloc(sizeof(*file_priv
), GFP_KERNEL
);
2201 file
->driver_priv
= file_priv
;
2203 spin_lock_init(&file_priv
->mm
.lock
);
2204 INIT_LIST_HEAD(&file_priv
->mm
.request_list
);
2210 * i915_driver_lastclose - clean up after all DRM clients have exited
2213 * Take care of cleaning up after all DRM clients have exited. In the
2214 * mode setting case, we want to restore the kernel's initial mode (just
2215 * in case the last client left us in a bad state).
2217 * Additionally, in the non-mode setting case, we'll tear down the AGP
2218 * and DMA structures, since the kernel won't be using them, and clea
2221 void i915_driver_lastclose(struct drm_device
* dev
)
2223 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2225 if (!dev_priv
|| drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2226 intel_fb_restore_mode(dev
);
2227 vga_switcheroo_process_delayed_switch();
2231 i915_gem_lastclose(dev
);
2233 if (dev_priv
->agp_heap
)
2234 i915_mem_takedown(&(dev_priv
->agp_heap
));
2236 i915_dma_cleanup(dev
);
2239 void i915_driver_preclose(struct drm_device
* dev
, struct drm_file
*file_priv
)
2241 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2242 i915_gem_release(dev
, file_priv
);
2243 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
2244 i915_mem_release(dev
, file_priv
, dev_priv
->agp_heap
);
2247 void i915_driver_postclose(struct drm_device
*dev
, struct drm_file
*file
)
2249 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2254 struct drm_ioctl_desc i915_ioctls
[] = {
2255 DRM_IOCTL_DEF_DRV(I915_INIT
, i915_dma_init
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2256 DRM_IOCTL_DEF_DRV(I915_FLUSH
, i915_flush_ioctl
, DRM_AUTH
),
2257 DRM_IOCTL_DEF_DRV(I915_FLIP
, i915_flip_bufs
, DRM_AUTH
),
2258 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER
, i915_batchbuffer
, DRM_AUTH
),
2259 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT
, i915_irq_emit
, DRM_AUTH
),
2260 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT
, i915_irq_wait
, DRM_AUTH
),
2261 DRM_IOCTL_DEF_DRV(I915_GETPARAM
, i915_getparam
, DRM_AUTH
),
2262 DRM_IOCTL_DEF_DRV(I915_SETPARAM
, i915_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2263 DRM_IOCTL_DEF_DRV(I915_ALLOC
, i915_mem_alloc
, DRM_AUTH
),
2264 DRM_IOCTL_DEF_DRV(I915_FREE
, i915_mem_free
, DRM_AUTH
),
2265 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP
, i915_mem_init_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2266 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER
, i915_cmdbuffer
, DRM_AUTH
),
2267 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP
, i915_mem_destroy_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2268 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE
, i915_vblank_pipe_set
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2269 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE
, i915_vblank_pipe_get
, DRM_AUTH
),
2270 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP
, i915_vblank_swap
, DRM_AUTH
),
2271 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR
, i915_set_status_page
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2272 DRM_IOCTL_DEF_DRV(I915_GEM_INIT
, i915_gem_init_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2273 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER
, i915_gem_execbuffer
, DRM_AUTH
|DRM_UNLOCKED
),
2274 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2
, i915_gem_execbuffer2
, DRM_AUTH
|DRM_UNLOCKED
),
2275 DRM_IOCTL_DEF_DRV(I915_GEM_PIN
, i915_gem_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2276 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN
, i915_gem_unpin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2277 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY
, i915_gem_busy_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
2278 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE
, i915_gem_throttle_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
2279 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT
, i915_gem_entervt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2280 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT
, i915_gem_leavevt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2281 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE
, i915_gem_create_ioctl
, DRM_UNLOCKED
),
2282 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD
, i915_gem_pread_ioctl
, DRM_UNLOCKED
),
2283 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE
, i915_gem_pwrite_ioctl
, DRM_UNLOCKED
),
2284 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP
, i915_gem_mmap_ioctl
, DRM_UNLOCKED
),
2285 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT
, i915_gem_mmap_gtt_ioctl
, DRM_UNLOCKED
),
2286 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN
, i915_gem_set_domain_ioctl
, DRM_UNLOCKED
),
2287 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH
, i915_gem_sw_finish_ioctl
, DRM_UNLOCKED
),
2288 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING
, i915_gem_set_tiling
, DRM_UNLOCKED
),
2289 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING
, i915_gem_get_tiling
, DRM_UNLOCKED
),
2290 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE
, i915_gem_get_aperture_ioctl
, DRM_UNLOCKED
),
2291 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID
, intel_get_pipe_from_crtc_id
, DRM_UNLOCKED
),
2292 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE
, i915_gem_madvise_ioctl
, DRM_UNLOCKED
),
2293 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE
, intel_overlay_put_image
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2294 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS
, intel_overlay_attrs
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2297 int i915_max_ioctl
= DRM_ARRAY_SIZE(i915_ioctls
);
2300 * Determine if the device really is AGP or not.
2302 * All Intel graphics chipsets are treated as AGP, even if they are really
2305 * \param dev The device to be tested.
2308 * A value of 1 is always retured to indictate every i9x5 is AGP.
2310 int i915_driver_device_is_agp(struct drm_device
* dev
)