1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31 #include "drm_crtc_helper.h"
32 #include "drm_fb_helper.h"
33 #include "intel_drv.h"
36 #include "i915_trace.h"
37 #include "../../../platform/x86/intel_ips.h"
38 #include <linux/pci.h>
39 #include <linux/vgaarb.h>
40 #include <linux/acpi.h>
41 #include <linux/pnp.h>
42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h>
44 #include <linux/module.h>
45 #include <acpi/video.h>
47 static void i915_write_hws_pga(struct drm_device
*dev
)
49 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
52 addr
= dev_priv
->status_page_dmah
->busaddr
;
53 if (INTEL_INFO(dev
)->gen
>= 4)
54 addr
|= (dev_priv
->status_page_dmah
->busaddr
>> 28) & 0xf0;
55 I915_WRITE(HWS_PGA
, addr
);
59 * Sets up the hardware status page for devices that need a physical address
62 static int i915_init_phys_hws(struct drm_device
*dev
)
64 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
66 /* Program Hardware Status Page */
67 dev_priv
->status_page_dmah
=
68 drm_pci_alloc(dev
, PAGE_SIZE
, PAGE_SIZE
);
70 if (!dev_priv
->status_page_dmah
) {
71 DRM_ERROR("Can not allocate hardware status page\n");
75 memset_io((void __force __iomem
*)dev_priv
->status_page_dmah
->vaddr
,
78 i915_write_hws_pga(dev
);
80 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
85 * Frees the hardware status page, whether it's a physical address or a virtual
86 * address set up by the X Server.
88 static void i915_free_hws(struct drm_device
*dev
)
90 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
91 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
93 if (dev_priv
->status_page_dmah
) {
94 drm_pci_free(dev
, dev_priv
->status_page_dmah
);
95 dev_priv
->status_page_dmah
= NULL
;
98 if (ring
->status_page
.gfx_addr
) {
99 ring
->status_page
.gfx_addr
= 0;
100 drm_core_ioremapfree(&dev_priv
->hws_map
, dev
);
103 /* Need to rewrite hardware status page */
104 I915_WRITE(HWS_PGA
, 0x1ffff000);
107 void i915_kernel_lost_context(struct drm_device
* dev
)
109 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
110 struct drm_i915_master_private
*master_priv
;
111 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
114 * We should never lose context on the ring with modesetting
115 * as we don't expose it to userspace
117 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
120 ring
->head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
121 ring
->tail
= I915_READ_TAIL(ring
) & TAIL_ADDR
;
122 ring
->space
= ring
->head
- (ring
->tail
+ 8);
124 ring
->space
+= ring
->size
;
126 if (!dev
->primary
->master
)
129 master_priv
= dev
->primary
->master
->driver_priv
;
130 if (ring
->head
== ring
->tail
&& master_priv
->sarea_priv
)
131 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_RING_EMPTY
;
134 static int i915_dma_cleanup(struct drm_device
* dev
)
136 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
139 /* Make sure interrupts are disabled here because the uninstall ioctl
140 * may not have been called from userspace and after dev_private
141 * is freed, it's too late.
143 if (dev
->irq_enabled
)
144 drm_irq_uninstall(dev
);
146 mutex_lock(&dev
->struct_mutex
);
147 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
148 intel_cleanup_ring_buffer(&dev_priv
->ring
[i
]);
149 mutex_unlock(&dev
->struct_mutex
);
151 /* Clear the HWS virtual address at teardown */
152 if (I915_NEED_GFX_HWS(dev
))
158 static int i915_initialize(struct drm_device
* dev
, drm_i915_init_t
* init
)
160 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
161 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
164 master_priv
->sarea
= drm_getsarea(dev
);
165 if (master_priv
->sarea
) {
166 master_priv
->sarea_priv
= (drm_i915_sarea_t
*)
167 ((u8
*)master_priv
->sarea
->handle
+ init
->sarea_priv_offset
);
169 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
172 if (init
->ring_size
!= 0) {
173 if (LP_RING(dev_priv
)->obj
!= NULL
) {
174 i915_dma_cleanup(dev
);
175 DRM_ERROR("Client tried to initialize ringbuffer in "
180 ret
= intel_render_ring_init_dri(dev
,
184 i915_dma_cleanup(dev
);
189 dev_priv
->cpp
= init
->cpp
;
190 dev_priv
->back_offset
= init
->back_offset
;
191 dev_priv
->front_offset
= init
->front_offset
;
192 dev_priv
->current_page
= 0;
193 if (master_priv
->sarea_priv
)
194 master_priv
->sarea_priv
->pf_current_page
= 0;
196 /* Allow hardware batchbuffers unless told otherwise.
198 dev_priv
->allow_batchbuffer
= 1;
203 static int i915_dma_resume(struct drm_device
* dev
)
205 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
206 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
208 DRM_DEBUG_DRIVER("%s\n", __func__
);
210 if (ring
->map
.handle
== NULL
) {
211 DRM_ERROR("can not ioremap virtual address for"
216 /* Program Hardware Status Page */
217 if (!ring
->status_page
.page_addr
) {
218 DRM_ERROR("Can not find hardware status page\n");
221 DRM_DEBUG_DRIVER("hw status page @ %p\n",
222 ring
->status_page
.page_addr
);
223 if (ring
->status_page
.gfx_addr
!= 0)
224 intel_ring_setup_status_page(ring
);
226 i915_write_hws_pga(dev
);
228 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
233 static int i915_dma_init(struct drm_device
*dev
, void *data
,
234 struct drm_file
*file_priv
)
236 drm_i915_init_t
*init
= data
;
239 switch (init
->func
) {
241 retcode
= i915_initialize(dev
, init
);
243 case I915_CLEANUP_DMA
:
244 retcode
= i915_dma_cleanup(dev
);
246 case I915_RESUME_DMA
:
247 retcode
= i915_dma_resume(dev
);
257 /* Implement basically the same security restrictions as hardware does
258 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
260 * Most of the calculations below involve calculating the size of a
261 * particular instruction. It's important to get the size right as
262 * that tells us where the next instruction to check is. Any illegal
263 * instruction detected will be given a size of zero, which is a
264 * signal to abort the rest of the buffer.
266 static int validate_cmd(int cmd
)
268 switch (((cmd
>> 29) & 0x7)) {
270 switch ((cmd
>> 23) & 0x3f) {
272 return 1; /* MI_NOOP */
274 return 1; /* MI_FLUSH */
276 return 0; /* disallow everything else */
280 return 0; /* reserved */
282 return (cmd
& 0xff) + 2; /* 2d commands */
284 if (((cmd
>> 24) & 0x1f) <= 0x18)
287 switch ((cmd
>> 24) & 0x1f) {
291 switch ((cmd
>> 16) & 0xff) {
293 return (cmd
& 0x1f) + 2;
295 return (cmd
& 0xf) + 2;
297 return (cmd
& 0xffff) + 2;
301 return (cmd
& 0xffff) + 1;
305 if ((cmd
& (1 << 23)) == 0) /* inline vertices */
306 return (cmd
& 0x1ffff) + 2;
307 else if (cmd
& (1 << 17)) /* indirect random */
308 if ((cmd
& 0xffff) == 0)
309 return 0; /* unknown length, too hard */
311 return (((cmd
& 0xffff) + 1) / 2) + 1;
313 return 2; /* indirect sequential */
324 static int i915_emit_cmds(struct drm_device
* dev
, int *buffer
, int dwords
)
326 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
329 if ((dwords
+1) * sizeof(int) >= LP_RING(dev_priv
)->size
- 8)
332 for (i
= 0; i
< dwords
;) {
333 int sz
= validate_cmd(buffer
[i
]);
334 if (sz
== 0 || i
+ sz
> dwords
)
339 ret
= BEGIN_LP_RING((dwords
+1)&~1);
343 for (i
= 0; i
< dwords
; i
++)
354 i915_emit_box(struct drm_device
*dev
,
355 struct drm_clip_rect
*box
,
358 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
361 if (box
->y2
<= box
->y1
|| box
->x2
<= box
->x1
||
362 box
->y2
<= 0 || box
->x2
<= 0) {
363 DRM_ERROR("Bad box %d,%d..%d,%d\n",
364 box
->x1
, box
->y1
, box
->x2
, box
->y2
);
368 if (INTEL_INFO(dev
)->gen
>= 4) {
369 ret
= BEGIN_LP_RING(4);
373 OUT_RING(GFX_OP_DRAWRECT_INFO_I965
);
374 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
375 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
378 ret
= BEGIN_LP_RING(6);
382 OUT_RING(GFX_OP_DRAWRECT_INFO
);
384 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
385 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
394 /* XXX: Emitting the counter should really be moved to part of the IRQ
395 * emit. For now, do it in both places:
398 static void i915_emit_breadcrumb(struct drm_device
*dev
)
400 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
401 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
404 if (dev_priv
->counter
> 0x7FFFFFFFUL
)
405 dev_priv
->counter
= 0;
406 if (master_priv
->sarea_priv
)
407 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
;
409 if (BEGIN_LP_RING(4) == 0) {
410 OUT_RING(MI_STORE_DWORD_INDEX
);
411 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
412 OUT_RING(dev_priv
->counter
);
418 static int i915_dispatch_cmdbuffer(struct drm_device
* dev
,
419 drm_i915_cmdbuffer_t
*cmd
,
420 struct drm_clip_rect
*cliprects
,
423 int nbox
= cmd
->num_cliprects
;
424 int i
= 0, count
, ret
;
427 DRM_ERROR("alignment");
431 i915_kernel_lost_context(dev
);
433 count
= nbox
? nbox
: 1;
435 for (i
= 0; i
< count
; i
++) {
437 ret
= i915_emit_box(dev
, &cliprects
[i
],
443 ret
= i915_emit_cmds(dev
, cmdbuf
, cmd
->sz
/ 4);
448 i915_emit_breadcrumb(dev
);
452 static int i915_dispatch_batchbuffer(struct drm_device
* dev
,
453 drm_i915_batchbuffer_t
* batch
,
454 struct drm_clip_rect
*cliprects
)
456 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
457 int nbox
= batch
->num_cliprects
;
460 if ((batch
->start
| batch
->used
) & 0x7) {
461 DRM_ERROR("alignment");
465 i915_kernel_lost_context(dev
);
467 count
= nbox
? nbox
: 1;
468 for (i
= 0; i
< count
; i
++) {
470 ret
= i915_emit_box(dev
, &cliprects
[i
],
471 batch
->DR1
, batch
->DR4
);
476 if (!IS_I830(dev
) && !IS_845G(dev
)) {
477 ret
= BEGIN_LP_RING(2);
481 if (INTEL_INFO(dev
)->gen
>= 4) {
482 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6) | MI_BATCH_NON_SECURE_I965
);
483 OUT_RING(batch
->start
);
485 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6));
486 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
489 ret
= BEGIN_LP_RING(4);
493 OUT_RING(MI_BATCH_BUFFER
);
494 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
495 OUT_RING(batch
->start
+ batch
->used
- 4);
502 if (IS_G4X(dev
) || IS_GEN5(dev
)) {
503 if (BEGIN_LP_RING(2) == 0) {
504 OUT_RING(MI_FLUSH
| MI_NO_WRITE_FLUSH
| MI_INVALIDATE_ISP
);
510 i915_emit_breadcrumb(dev
);
514 static int i915_dispatch_flip(struct drm_device
* dev
)
516 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
517 struct drm_i915_master_private
*master_priv
=
518 dev
->primary
->master
->driver_priv
;
521 if (!master_priv
->sarea_priv
)
524 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
526 dev_priv
->current_page
,
527 master_priv
->sarea_priv
->pf_current_page
);
529 i915_kernel_lost_context(dev
);
531 ret
= BEGIN_LP_RING(10);
535 OUT_RING(MI_FLUSH
| MI_READ_FLUSH
);
538 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO
| ASYNC_FLIP
);
540 if (dev_priv
->current_page
== 0) {
541 OUT_RING(dev_priv
->back_offset
);
542 dev_priv
->current_page
= 1;
544 OUT_RING(dev_priv
->front_offset
);
545 dev_priv
->current_page
= 0;
549 OUT_RING(MI_WAIT_FOR_EVENT
| MI_WAIT_FOR_PLANE_A_FLIP
);
554 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
++;
556 if (BEGIN_LP_RING(4) == 0) {
557 OUT_RING(MI_STORE_DWORD_INDEX
);
558 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
559 OUT_RING(dev_priv
->counter
);
564 master_priv
->sarea_priv
->pf_current_page
= dev_priv
->current_page
;
568 static int i915_quiescent(struct drm_device
*dev
)
570 struct intel_ring_buffer
*ring
= LP_RING(dev
->dev_private
);
572 i915_kernel_lost_context(dev
);
573 return intel_wait_ring_idle(ring
);
576 static int i915_flush_ioctl(struct drm_device
*dev
, void *data
,
577 struct drm_file
*file_priv
)
581 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
583 mutex_lock(&dev
->struct_mutex
);
584 ret
= i915_quiescent(dev
);
585 mutex_unlock(&dev
->struct_mutex
);
590 static int i915_batchbuffer(struct drm_device
*dev
, void *data
,
591 struct drm_file
*file_priv
)
593 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
594 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
595 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
596 master_priv
->sarea_priv
;
597 drm_i915_batchbuffer_t
*batch
= data
;
599 struct drm_clip_rect
*cliprects
= NULL
;
601 if (!dev_priv
->allow_batchbuffer
) {
602 DRM_ERROR("Batchbuffer ioctl disabled\n");
606 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
607 batch
->start
, batch
->used
, batch
->num_cliprects
);
609 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
611 if (batch
->num_cliprects
< 0)
614 if (batch
->num_cliprects
) {
615 cliprects
= kcalloc(batch
->num_cliprects
,
616 sizeof(struct drm_clip_rect
),
618 if (cliprects
== NULL
)
621 ret
= copy_from_user(cliprects
, batch
->cliprects
,
622 batch
->num_cliprects
*
623 sizeof(struct drm_clip_rect
));
630 mutex_lock(&dev
->struct_mutex
);
631 ret
= i915_dispatch_batchbuffer(dev
, batch
, cliprects
);
632 mutex_unlock(&dev
->struct_mutex
);
635 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
643 static int i915_cmdbuffer(struct drm_device
*dev
, void *data
,
644 struct drm_file
*file_priv
)
646 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
647 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
648 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
649 master_priv
->sarea_priv
;
650 drm_i915_cmdbuffer_t
*cmdbuf
= data
;
651 struct drm_clip_rect
*cliprects
= NULL
;
655 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
656 cmdbuf
->buf
, cmdbuf
->sz
, cmdbuf
->num_cliprects
);
658 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
660 if (cmdbuf
->num_cliprects
< 0)
663 batch_data
= kmalloc(cmdbuf
->sz
, GFP_KERNEL
);
664 if (batch_data
== NULL
)
667 ret
= copy_from_user(batch_data
, cmdbuf
->buf
, cmdbuf
->sz
);
670 goto fail_batch_free
;
673 if (cmdbuf
->num_cliprects
) {
674 cliprects
= kcalloc(cmdbuf
->num_cliprects
,
675 sizeof(struct drm_clip_rect
), GFP_KERNEL
);
676 if (cliprects
== NULL
) {
678 goto fail_batch_free
;
681 ret
= copy_from_user(cliprects
, cmdbuf
->cliprects
,
682 cmdbuf
->num_cliprects
*
683 sizeof(struct drm_clip_rect
));
690 mutex_lock(&dev
->struct_mutex
);
691 ret
= i915_dispatch_cmdbuffer(dev
, cmdbuf
, cliprects
, batch_data
);
692 mutex_unlock(&dev
->struct_mutex
);
694 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
699 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
709 static int i915_flip_bufs(struct drm_device
*dev
, void *data
,
710 struct drm_file
*file_priv
)
714 DRM_DEBUG_DRIVER("%s\n", __func__
);
716 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
718 mutex_lock(&dev
->struct_mutex
);
719 ret
= i915_dispatch_flip(dev
);
720 mutex_unlock(&dev
->struct_mutex
);
725 static int i915_getparam(struct drm_device
*dev
, void *data
,
726 struct drm_file
*file_priv
)
728 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
729 drm_i915_getparam_t
*param
= data
;
733 DRM_ERROR("called with no initialization\n");
737 switch (param
->param
) {
738 case I915_PARAM_IRQ_ACTIVE
:
739 value
= dev
->pdev
->irq
? 1 : 0;
741 case I915_PARAM_ALLOW_BATCHBUFFER
:
742 value
= dev_priv
->allow_batchbuffer
? 1 : 0;
744 case I915_PARAM_LAST_DISPATCH
:
745 value
= READ_BREADCRUMB(dev_priv
);
747 case I915_PARAM_CHIPSET_ID
:
748 value
= dev
->pci_device
;
750 case I915_PARAM_HAS_GEM
:
751 value
= dev_priv
->has_gem
;
753 case I915_PARAM_NUM_FENCES_AVAIL
:
754 value
= dev_priv
->num_fence_regs
- dev_priv
->fence_reg_start
;
756 case I915_PARAM_HAS_OVERLAY
:
757 value
= dev_priv
->overlay
? 1 : 0;
759 case I915_PARAM_HAS_PAGEFLIPPING
:
762 case I915_PARAM_HAS_EXECBUF2
:
764 value
= dev_priv
->has_gem
;
766 case I915_PARAM_HAS_BSD
:
767 value
= HAS_BSD(dev
);
769 case I915_PARAM_HAS_BLT
:
770 value
= HAS_BLT(dev
);
772 case I915_PARAM_HAS_RELAXED_FENCING
:
775 case I915_PARAM_HAS_COHERENT_RINGS
:
778 case I915_PARAM_HAS_EXEC_CONSTANTS
:
779 value
= INTEL_INFO(dev
)->gen
>= 4;
781 case I915_PARAM_HAS_RELAXED_DELTA
:
784 case I915_PARAM_HAS_GEN7_SOL_RESET
:
788 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
793 if (DRM_COPY_TO_USER(param
->value
, &value
, sizeof(int))) {
794 DRM_ERROR("DRM_COPY_TO_USER failed\n");
801 static int i915_setparam(struct drm_device
*dev
, void *data
,
802 struct drm_file
*file_priv
)
804 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
805 drm_i915_setparam_t
*param
= data
;
808 DRM_ERROR("called with no initialization\n");
812 switch (param
->param
) {
813 case I915_SETPARAM_USE_MI_BATCHBUFFER_START
:
815 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY
:
816 dev_priv
->tex_lru_log_granularity
= param
->value
;
818 case I915_SETPARAM_ALLOW_BATCHBUFFER
:
819 dev_priv
->allow_batchbuffer
= param
->value
;
821 case I915_SETPARAM_NUM_USED_FENCES
:
822 if (param
->value
> dev_priv
->num_fence_regs
||
825 /* Userspace can use first N regs */
826 dev_priv
->fence_reg_start
= param
->value
;
829 DRM_DEBUG_DRIVER("unknown parameter %d\n",
837 static int i915_set_status_page(struct drm_device
*dev
, void *data
,
838 struct drm_file
*file_priv
)
840 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
841 drm_i915_hws_addr_t
*hws
= data
;
842 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
844 if (!I915_NEED_GFX_HWS(dev
))
848 DRM_ERROR("called with no initialization\n");
852 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
853 WARN(1, "tried to set status page when mode setting active\n");
857 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32
)hws
->addr
);
859 ring
->status_page
.gfx_addr
= hws
->addr
& (0x1ffff<<12);
861 dev_priv
->hws_map
.offset
= dev
->agp
->base
+ hws
->addr
;
862 dev_priv
->hws_map
.size
= 4*1024;
863 dev_priv
->hws_map
.type
= 0;
864 dev_priv
->hws_map
.flags
= 0;
865 dev_priv
->hws_map
.mtrr
= 0;
867 drm_core_ioremap_wc(&dev_priv
->hws_map
, dev
);
868 if (dev_priv
->hws_map
.handle
== NULL
) {
869 i915_dma_cleanup(dev
);
870 ring
->status_page
.gfx_addr
= 0;
871 DRM_ERROR("can not ioremap virtual address for"
872 " G33 hw status page\n");
875 ring
->status_page
.page_addr
=
876 (void __force __iomem
*)dev_priv
->hws_map
.handle
;
877 memset_io(ring
->status_page
.page_addr
, 0, PAGE_SIZE
);
878 I915_WRITE(HWS_PGA
, ring
->status_page
.gfx_addr
);
880 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
881 ring
->status_page
.gfx_addr
);
882 DRM_DEBUG_DRIVER("load hws at %p\n",
883 ring
->status_page
.page_addr
);
887 static int i915_get_bridge_dev(struct drm_device
*dev
)
889 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
891 dev_priv
->bridge_dev
= pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
892 if (!dev_priv
->bridge_dev
) {
893 DRM_ERROR("bridge device not found\n");
899 #define MCHBAR_I915 0x44
900 #define MCHBAR_I965 0x48
901 #define MCHBAR_SIZE (4*4096)
903 #define DEVEN_REG 0x54
904 #define DEVEN_MCHBAR_EN (1 << 28)
906 /* Allocate space for the MCH regs if needed, return nonzero on error */
908 intel_alloc_mchbar_resource(struct drm_device
*dev
)
910 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
911 int reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
912 u32 temp_lo
, temp_hi
= 0;
916 if (INTEL_INFO(dev
)->gen
>= 4)
917 pci_read_config_dword(dev_priv
->bridge_dev
, reg
+ 4, &temp_hi
);
918 pci_read_config_dword(dev_priv
->bridge_dev
, reg
, &temp_lo
);
919 mchbar_addr
= ((u64
)temp_hi
<< 32) | temp_lo
;
921 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
924 pnp_range_reserved(mchbar_addr
, mchbar_addr
+ MCHBAR_SIZE
))
928 /* Get some space for it */
929 dev_priv
->mch_res
.name
= "i915 MCHBAR";
930 dev_priv
->mch_res
.flags
= IORESOURCE_MEM
;
931 ret
= pci_bus_alloc_resource(dev_priv
->bridge_dev
->bus
,
933 MCHBAR_SIZE
, MCHBAR_SIZE
,
935 0, pcibios_align_resource
,
936 dev_priv
->bridge_dev
);
938 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret
);
939 dev_priv
->mch_res
.start
= 0;
943 if (INTEL_INFO(dev
)->gen
>= 4)
944 pci_write_config_dword(dev_priv
->bridge_dev
, reg
+ 4,
945 upper_32_bits(dev_priv
->mch_res
.start
));
947 pci_write_config_dword(dev_priv
->bridge_dev
, reg
,
948 lower_32_bits(dev_priv
->mch_res
.start
));
952 /* Setup MCHBAR if possible, return true if we should disable it again */
954 intel_setup_mchbar(struct drm_device
*dev
)
956 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
957 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
961 dev_priv
->mchbar_need_disable
= false;
963 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
964 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
965 enabled
= !!(temp
& DEVEN_MCHBAR_EN
);
967 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
971 /* If it's already enabled, don't have to do anything */
975 if (intel_alloc_mchbar_resource(dev
))
978 dev_priv
->mchbar_need_disable
= true;
980 /* Space is allocated or reserved, so enable it. */
981 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
982 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
,
983 temp
| DEVEN_MCHBAR_EN
);
985 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
986 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
| 1);
991 intel_teardown_mchbar(struct drm_device
*dev
)
993 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
994 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
997 if (dev_priv
->mchbar_need_disable
) {
998 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
999 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
1000 temp
&= ~DEVEN_MCHBAR_EN
;
1001 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, temp
);
1003 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
1005 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
);
1009 if (dev_priv
->mch_res
.start
)
1010 release_resource(&dev_priv
->mch_res
);
1013 #define PTE_ADDRESS_MASK 0xfffff000
1014 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1015 #define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1016 #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1017 #define PTE_MAPPING_TYPE_CACHED (3 << 1)
1018 #define PTE_MAPPING_TYPE_MASK (3 << 1)
1019 #define PTE_VALID (1 << 0)
1022 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
1025 * @offset: address to translate
1027 * Some chip functions require allocations from stolen space and need the
1028 * physical address of the memory in question.
1030 static unsigned long i915_stolen_to_phys(struct drm_device
*dev
, u32 offset
)
1032 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1033 struct pci_dev
*pdev
= dev_priv
->bridge_dev
;
1037 /* On the machines I have tested the Graphics Base of Stolen Memory
1038 * is unreliable, so compute the base by subtracting the stolen memory
1039 * from the Top of Low Usable DRAM which is where the BIOS places
1040 * the graphics stolen memory.
1042 if (INTEL_INFO(dev
)->gen
> 3 || IS_G33(dev
)) {
1043 /* top 32bits are reserved = 0 */
1044 pci_read_config_dword(pdev
, 0xA4, &base
);
1046 /* XXX presume 8xx is the same as i915 */
1047 pci_bus_read_config_dword(pdev
->bus
, 2, 0x5C, &base
);
1050 if (INTEL_INFO(dev
)->gen
> 3 || IS_G33(dev
)) {
1052 pci_read_config_word(pdev
, 0xb0, &val
);
1053 base
= val
>> 4 << 20;
1056 pci_read_config_byte(pdev
, 0x9c, &val
);
1057 base
= val
>> 3 << 27;
1059 base
-= dev_priv
->mm
.gtt
->stolen_size
;
1062 return base
+ offset
;
1065 static void i915_warn_stolen(struct drm_device
*dev
)
1067 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1068 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1071 static void i915_setup_compression(struct drm_device
*dev
, int size
)
1073 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1074 struct drm_mm_node
*compressed_fb
, *uninitialized_var(compressed_llb
);
1075 unsigned long cfb_base
;
1076 unsigned long ll_base
= 0;
1078 /* Just in case the BIOS is doing something questionable. */
1079 intel_disable_fbc(dev
);
1081 compressed_fb
= drm_mm_search_free(&dev_priv
->mm
.stolen
, size
, 4096, 0);
1083 compressed_fb
= drm_mm_get_block(compressed_fb
, size
, 4096);
1087 cfb_base
= i915_stolen_to_phys(dev
, compressed_fb
->start
);
1091 if (!(IS_GM45(dev
) || HAS_PCH_SPLIT(dev
))) {
1092 compressed_llb
= drm_mm_search_free(&dev_priv
->mm
.stolen
,
1095 compressed_llb
= drm_mm_get_block(compressed_llb
,
1097 if (!compressed_llb
)
1100 ll_base
= i915_stolen_to_phys(dev
, compressed_llb
->start
);
1105 dev_priv
->cfb_size
= size
;
1107 dev_priv
->compressed_fb
= compressed_fb
;
1108 if (HAS_PCH_SPLIT(dev
))
1109 I915_WRITE(ILK_DPFC_CB_BASE
, compressed_fb
->start
);
1110 else if (IS_GM45(dev
)) {
1111 I915_WRITE(DPFC_CB_BASE
, compressed_fb
->start
);
1113 I915_WRITE(FBC_CFB_BASE
, cfb_base
);
1114 I915_WRITE(FBC_LL_BASE
, ll_base
);
1115 dev_priv
->compressed_llb
= compressed_llb
;
1118 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
1119 cfb_base
, ll_base
, size
>> 20);
1123 drm_mm_put_block(compressed_llb
);
1125 drm_mm_put_block(compressed_fb
);
1127 dev_priv
->no_fbc_reason
= FBC_STOLEN_TOO_SMALL
;
1128 i915_warn_stolen(dev
);
1131 static void i915_cleanup_compression(struct drm_device
*dev
)
1133 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1135 drm_mm_put_block(dev_priv
->compressed_fb
);
1136 if (dev_priv
->compressed_llb
)
1137 drm_mm_put_block(dev_priv
->compressed_llb
);
1140 /* true = enable decode, false = disable decoder */
1141 static unsigned int i915_vga_set_decode(void *cookie
, bool state
)
1143 struct drm_device
*dev
= cookie
;
1145 intel_modeset_vga_set_state(dev
, state
);
1147 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
1148 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1150 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1153 static void i915_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
1155 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1156 pm_message_t pmm
= { .event
= PM_EVENT_SUSPEND
};
1157 if (state
== VGA_SWITCHEROO_ON
) {
1158 printk(KERN_INFO
"i915: switched on\n");
1159 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1160 /* i915 resume handler doesn't set to D0 */
1161 pci_set_power_state(dev
->pdev
, PCI_D0
);
1163 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
1165 printk(KERN_ERR
"i915: switched off\n");
1166 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1167 i915_suspend(dev
, pmm
);
1168 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
1172 static bool i915_switcheroo_can_switch(struct pci_dev
*pdev
)
1174 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1177 spin_lock(&dev
->count_lock
);
1178 can_switch
= (dev
->open_count
== 0);
1179 spin_unlock(&dev
->count_lock
);
1183 static int i915_load_gem_init(struct drm_device
*dev
)
1185 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1186 unsigned long prealloc_size
, gtt_size
, mappable_size
;
1189 prealloc_size
= dev_priv
->mm
.gtt
->stolen_size
;
1190 gtt_size
= dev_priv
->mm
.gtt
->gtt_total_entries
<< PAGE_SHIFT
;
1191 mappable_size
= dev_priv
->mm
.gtt
->gtt_mappable_entries
<< PAGE_SHIFT
;
1193 /* Basic memrange allocator for stolen space */
1194 drm_mm_init(&dev_priv
->mm
.stolen
, 0, prealloc_size
);
1196 /* Let GEM Manage all of the aperture.
1198 * However, leave one page at the end still bound to the scratch page.
1199 * There are a number of places where the hardware apparently
1200 * prefetches past the end of the object, and we've seen multiple
1201 * hangs with the GPU head pointer stuck in a batchbuffer bound
1202 * at the last page of the aperture. One page should be enough to
1203 * keep any prefetching inside of the aperture.
1205 i915_gem_do_init(dev
, 0, mappable_size
, gtt_size
- PAGE_SIZE
);
1207 mutex_lock(&dev
->struct_mutex
);
1208 ret
= i915_gem_init_ringbuffer(dev
);
1209 mutex_unlock(&dev
->struct_mutex
);
1213 /* Try to set up FBC with a reasonable compressed buffer size */
1214 if (I915_HAS_FBC(dev
) && i915_powersave
) {
1217 /* Leave 1M for line length buffer & misc. */
1219 /* Try to get a 32M buffer... */
1220 if (prealloc_size
> (36*1024*1024))
1221 cfb_size
= 32*1024*1024;
1222 else /* fall back to 7/8 of the stolen space */
1223 cfb_size
= prealloc_size
* 7 / 8;
1224 i915_setup_compression(dev
, cfb_size
);
1227 /* Allow hardware batchbuffers unless told otherwise. */
1228 dev_priv
->allow_batchbuffer
= 1;
1232 static int i915_load_modeset_init(struct drm_device
*dev
)
1234 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1237 ret
= intel_parse_bios(dev
);
1239 DRM_INFO("failed to find VBIOS tables\n");
1241 /* If we have > 1 VGA cards, then we need to arbitrate access
1242 * to the common VGA resources.
1244 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1245 * then we do not take part in VGA arbitration and the
1246 * vga_client_register() fails with -ENODEV.
1248 ret
= vga_client_register(dev
->pdev
, dev
, NULL
, i915_vga_set_decode
);
1249 if (ret
&& ret
!= -ENODEV
)
1252 intel_register_dsm_handler();
1254 ret
= vga_switcheroo_register_client(dev
->pdev
,
1255 i915_switcheroo_set_state
,
1257 i915_switcheroo_can_switch
);
1259 goto cleanup_vga_client
;
1261 /* IIR "flip pending" bit means done if this bit is set */
1262 if (IS_GEN3(dev
) && (I915_READ(ECOSKPD
) & ECO_FLIP_DONE
))
1263 dev_priv
->flip_pending_is_done
= true;
1265 intel_modeset_init(dev
);
1267 ret
= i915_load_gem_init(dev
);
1269 goto cleanup_vga_switcheroo
;
1271 intel_modeset_gem_init(dev
);
1273 ret
= drm_irq_install(dev
);
1277 /* Always safe in the mode setting case. */
1278 /* FIXME: do pre/post-mode set stuff in core KMS code */
1279 dev
->vblank_disable_allowed
= 1;
1281 ret
= intel_fbdev_init(dev
);
1285 drm_kms_helper_poll_init(dev
);
1287 /* We're off and running w/KMS */
1288 dev_priv
->mm
.suspended
= 0;
1293 drm_irq_uninstall(dev
);
1295 mutex_lock(&dev
->struct_mutex
);
1296 i915_gem_cleanup_ringbuffer(dev
);
1297 mutex_unlock(&dev
->struct_mutex
);
1298 cleanup_vga_switcheroo
:
1299 vga_switcheroo_unregister_client(dev
->pdev
);
1301 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1306 int i915_master_create(struct drm_device
*dev
, struct drm_master
*master
)
1308 struct drm_i915_master_private
*master_priv
;
1310 master_priv
= kzalloc(sizeof(*master_priv
), GFP_KERNEL
);
1314 master
->driver_priv
= master_priv
;
1318 void i915_master_destroy(struct drm_device
*dev
, struct drm_master
*master
)
1320 struct drm_i915_master_private
*master_priv
= master
->driver_priv
;
1327 master
->driver_priv
= NULL
;
1330 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
1332 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1335 tmp
= I915_READ(CLKCFG
);
1337 switch (tmp
& CLKCFG_FSB_MASK
) {
1338 case CLKCFG_FSB_533
:
1339 dev_priv
->fsb_freq
= 533; /* 133*4 */
1341 case CLKCFG_FSB_800
:
1342 dev_priv
->fsb_freq
= 800; /* 200*4 */
1344 case CLKCFG_FSB_667
:
1345 dev_priv
->fsb_freq
= 667; /* 167*4 */
1347 case CLKCFG_FSB_400
:
1348 dev_priv
->fsb_freq
= 400; /* 100*4 */
1352 switch (tmp
& CLKCFG_MEM_MASK
) {
1353 case CLKCFG_MEM_533
:
1354 dev_priv
->mem_freq
= 533;
1356 case CLKCFG_MEM_667
:
1357 dev_priv
->mem_freq
= 667;
1359 case CLKCFG_MEM_800
:
1360 dev_priv
->mem_freq
= 800;
1364 /* detect pineview DDR3 setting */
1365 tmp
= I915_READ(CSHRDDR3CTL
);
1366 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
1369 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
1371 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1374 ddrpll
= I915_READ16(DDRMPLL1
);
1375 csipll
= I915_READ16(CSIPLL0
);
1377 switch (ddrpll
& 0xff) {
1379 dev_priv
->mem_freq
= 800;
1382 dev_priv
->mem_freq
= 1066;
1385 dev_priv
->mem_freq
= 1333;
1388 dev_priv
->mem_freq
= 1600;
1391 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1393 dev_priv
->mem_freq
= 0;
1397 dev_priv
->r_t
= dev_priv
->mem_freq
;
1399 switch (csipll
& 0x3ff) {
1401 dev_priv
->fsb_freq
= 3200;
1404 dev_priv
->fsb_freq
= 3733;
1407 dev_priv
->fsb_freq
= 4266;
1410 dev_priv
->fsb_freq
= 4800;
1413 dev_priv
->fsb_freq
= 5333;
1416 dev_priv
->fsb_freq
= 5866;
1419 dev_priv
->fsb_freq
= 6400;
1422 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1424 dev_priv
->fsb_freq
= 0;
1428 if (dev_priv
->fsb_freq
== 3200) {
1430 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
1437 static const struct cparams
{
1443 { 1, 1333, 301, 28664 },
1444 { 1, 1066, 294, 24460 },
1445 { 1, 800, 294, 25192 },
1446 { 0, 1333, 276, 27605 },
1447 { 0, 1066, 276, 27605 },
1448 { 0, 800, 231, 23784 },
1451 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
1453 u64 total_count
, diff
, ret
;
1454 u32 count1
, count2
, count3
, m
= 0, c
= 0;
1455 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
1458 diff1
= now
- dev_priv
->last_time1
;
1460 /* Prevent division-by-zero if we are asking too fast.
1461 * Also, we don't get interesting results if we are polling
1462 * faster than once in 10ms, so just return the saved value
1466 return dev_priv
->chipset_power
;
1468 count1
= I915_READ(DMIEC
);
1469 count2
= I915_READ(DDREC
);
1470 count3
= I915_READ(CSIEC
);
1472 total_count
= count1
+ count2
+ count3
;
1474 /* FIXME: handle per-counter overflow */
1475 if (total_count
< dev_priv
->last_count1
) {
1476 diff
= ~0UL - dev_priv
->last_count1
;
1477 diff
+= total_count
;
1479 diff
= total_count
- dev_priv
->last_count1
;
1482 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
1483 if (cparams
[i
].i
== dev_priv
->c_m
&&
1484 cparams
[i
].t
== dev_priv
->r_t
) {
1491 diff
= div_u64(diff
, diff1
);
1492 ret
= ((m
* diff
) + c
);
1493 ret
= div_u64(ret
, 10);
1495 dev_priv
->last_count1
= total_count
;
1496 dev_priv
->last_time1
= now
;
1498 dev_priv
->chipset_power
= ret
;
1503 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
1505 unsigned long m
, x
, b
;
1508 tsfs
= I915_READ(TSFS
);
1510 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
1511 x
= I915_READ8(TR1
);
1513 b
= tsfs
& TSFS_INTR_MASK
;
1515 return ((m
* x
) / 127) - b
;
1518 static u16
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
1520 static const struct v_table
{
1521 u16 vd
; /* in .1 mil */
1522 u16 vm
; /* in .1 mil */
1653 if (dev_priv
->info
->is_mobile
)
1654 return v_table
[pxvid
].vm
;
1656 return v_table
[pxvid
].vd
;
1659 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
1661 struct timespec now
, diff1
;
1663 unsigned long diffms
;
1666 getrawmonotonic(&now
);
1667 diff1
= timespec_sub(now
, dev_priv
->last_time2
);
1669 /* Don't divide by 0 */
1670 diffms
= diff1
.tv_sec
* 1000 + diff1
.tv_nsec
/ 1000000;
1674 count
= I915_READ(GFXEC
);
1676 if (count
< dev_priv
->last_count2
) {
1677 diff
= ~0UL - dev_priv
->last_count2
;
1680 diff
= count
- dev_priv
->last_count2
;
1683 dev_priv
->last_count2
= count
;
1684 dev_priv
->last_time2
= now
;
1686 /* More magic constants... */
1688 diff
= div_u64(diff
, diffms
* 10);
1689 dev_priv
->gfx_power
= diff
;
1692 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
1694 unsigned long t
, corr
, state1
, corr2
, state2
;
1697 pxvid
= I915_READ(PXVFREQ_BASE
+ (dev_priv
->cur_delay
* 4));
1698 pxvid
= (pxvid
>> 24) & 0x7f;
1699 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
1703 t
= i915_mch_val(dev_priv
);
1705 /* Revel in the empirically derived constants */
1707 /* Correction factor in 1/100000 units */
1709 corr
= ((t
* 2349) + 135940);
1711 corr
= ((t
* 964) + 29317);
1713 corr
= ((t
* 301) + 1004);
1715 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
1717 corr2
= (corr
* dev_priv
->corr
);
1719 state2
= (corr2
* state1
) / 10000;
1720 state2
/= 100; /* convert to mW */
1722 i915_update_gfx_val(dev_priv
);
1724 return dev_priv
->gfx_power
+ state2
;
1727 /* Global for IPS driver to get at the current i915 device */
1728 static struct drm_i915_private
*i915_mch_dev
;
1730 * Lock protecting IPS related data structures
1732 * - dev_priv->max_delay
1733 * - dev_priv->min_delay
1735 * - dev_priv->gpu_busy
1737 static DEFINE_SPINLOCK(mchdev_lock
);
1740 * i915_read_mch_val - return value for IPS use
1742 * Calculate and return a value for the IPS driver to use when deciding whether
1743 * we have thermal and power headroom to increase CPU or GPU power budget.
1745 unsigned long i915_read_mch_val(void)
1747 struct drm_i915_private
*dev_priv
;
1748 unsigned long chipset_val
, graphics_val
, ret
= 0;
1750 spin_lock(&mchdev_lock
);
1753 dev_priv
= i915_mch_dev
;
1755 chipset_val
= i915_chipset_val(dev_priv
);
1756 graphics_val
= i915_gfx_val(dev_priv
);
1758 ret
= chipset_val
+ graphics_val
;
1761 spin_unlock(&mchdev_lock
);
1765 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
1768 * i915_gpu_raise - raise GPU frequency limit
1770 * Raise the limit; IPS indicates we have thermal headroom.
1772 bool i915_gpu_raise(void)
1774 struct drm_i915_private
*dev_priv
;
1777 spin_lock(&mchdev_lock
);
1778 if (!i915_mch_dev
) {
1782 dev_priv
= i915_mch_dev
;
1784 if (dev_priv
->max_delay
> dev_priv
->fmax
)
1785 dev_priv
->max_delay
--;
1788 spin_unlock(&mchdev_lock
);
1792 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
1795 * i915_gpu_lower - lower GPU frequency limit
1797 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1798 * frequency maximum.
1800 bool i915_gpu_lower(void)
1802 struct drm_i915_private
*dev_priv
;
1805 spin_lock(&mchdev_lock
);
1806 if (!i915_mch_dev
) {
1810 dev_priv
= i915_mch_dev
;
1812 if (dev_priv
->max_delay
< dev_priv
->min_delay
)
1813 dev_priv
->max_delay
++;
1816 spin_unlock(&mchdev_lock
);
1820 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
1823 * i915_gpu_busy - indicate GPU business to IPS
1825 * Tell the IPS driver whether or not the GPU is busy.
1827 bool i915_gpu_busy(void)
1829 struct drm_i915_private
*dev_priv
;
1832 spin_lock(&mchdev_lock
);
1835 dev_priv
= i915_mch_dev
;
1837 ret
= dev_priv
->busy
;
1840 spin_unlock(&mchdev_lock
);
1844 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
1847 * i915_gpu_turbo_disable - disable graphics turbo
1849 * Disable graphics turbo by resetting the max frequency and setting the
1850 * current frequency to the default.
1852 bool i915_gpu_turbo_disable(void)
1854 struct drm_i915_private
*dev_priv
;
1857 spin_lock(&mchdev_lock
);
1858 if (!i915_mch_dev
) {
1862 dev_priv
= i915_mch_dev
;
1864 dev_priv
->max_delay
= dev_priv
->fstart
;
1866 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->fstart
))
1870 spin_unlock(&mchdev_lock
);
1874 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
1877 * Tells the intel_ips driver that the i915 driver is now loaded, if
1878 * IPS got loaded first.
1880 * This awkward dance is so that neither module has to depend on the
1881 * other in order for IPS to do the appropriate communication of
1882 * GPU turbo limits to i915.
1885 ips_ping_for_i915_load(void)
1889 link
= symbol_get(ips_link_to_i915_driver
);
1892 symbol_put(ips_link_to_i915_driver
);
1897 * i915_driver_load - setup chip and create an initial config
1899 * @flags: startup flags
1901 * The driver load routine has to do several things:
1902 * - drive output discovery via intel_modeset_init()
1903 * - initialize the memory manager
1904 * - allocate initial config memory
1905 * - setup the DRM framebuffer with the allocated memory
1907 int i915_driver_load(struct drm_device
*dev
, unsigned long flags
)
1909 struct drm_i915_private
*dev_priv
;
1910 int ret
= 0, mmio_bar
;
1913 /* i915 has 4 more counters */
1915 dev
->types
[6] = _DRM_STAT_IRQ
;
1916 dev
->types
[7] = _DRM_STAT_PRIMARY
;
1917 dev
->types
[8] = _DRM_STAT_SECONDARY
;
1918 dev
->types
[9] = _DRM_STAT_DMA
;
1920 dev_priv
= kzalloc(sizeof(drm_i915_private_t
), GFP_KERNEL
);
1921 if (dev_priv
== NULL
)
1924 dev
->dev_private
= (void *)dev_priv
;
1925 dev_priv
->dev
= dev
;
1926 dev_priv
->info
= (struct intel_device_info
*) flags
;
1928 if (i915_get_bridge_dev(dev
)) {
1933 /* overlay on gen2 is broken and can't address above 1G */
1935 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(30));
1937 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1938 * using 32bit addressing, overwriting memory if HWS is located
1941 * The documentation also mentions an issue with undefined
1942 * behaviour if any general state is accessed within a page above 4GB,
1943 * which also needs to be handled carefully.
1945 if (IS_BROADWATER(dev
) || IS_CRESTLINE(dev
))
1946 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(32));
1948 mmio_bar
= IS_GEN2(dev
) ? 1 : 0;
1949 dev_priv
->regs
= pci_iomap(dev
->pdev
, mmio_bar
, 0);
1950 if (!dev_priv
->regs
) {
1951 DRM_ERROR("failed to map registers\n");
1956 dev_priv
->mm
.gtt
= intel_gtt_get();
1957 if (!dev_priv
->mm
.gtt
) {
1958 DRM_ERROR("Failed to initialize GTT\n");
1963 agp_size
= dev_priv
->mm
.gtt
->gtt_mappable_entries
<< PAGE_SHIFT
;
1965 dev_priv
->mm
.gtt_mapping
=
1966 io_mapping_create_wc(dev
->agp
->base
, agp_size
);
1967 if (dev_priv
->mm
.gtt_mapping
== NULL
) {
1972 /* Set up a WC MTRR for non-PAT systems. This is more common than
1973 * one would think, because the kernel disables PAT on first
1974 * generation Core chips because WC PAT gets overridden by a UC
1975 * MTRR if present. Even if a UC MTRR isn't present.
1977 dev_priv
->mm
.gtt_mtrr
= mtrr_add(dev
->agp
->base
,
1979 MTRR_TYPE_WRCOMB
, 1);
1980 if (dev_priv
->mm
.gtt_mtrr
< 0) {
1981 DRM_INFO("MTRR allocation failed. Graphics "
1982 "performance may suffer.\n");
1985 /* The i915 workqueue is primarily used for batched retirement of
1986 * requests (and thus managing bo) once the task has been completed
1987 * by the GPU. i915_gem_retire_requests() is called directly when we
1988 * need high-priority retirement, such as waiting for an explicit
1991 * It is also used for periodic low-priority events, such as
1992 * idle-timers and recording error state.
1994 * All tasks on the workqueue are expected to acquire the dev mutex
1995 * so there is no point in running more than one instance of the
1996 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1998 dev_priv
->wq
= alloc_workqueue("i915",
1999 WQ_UNBOUND
| WQ_NON_REENTRANT
,
2001 if (dev_priv
->wq
== NULL
) {
2002 DRM_ERROR("Failed to create our workqueue.\n");
2007 /* enable GEM by default */
2008 dev_priv
->has_gem
= 1;
2010 intel_irq_init(dev
);
2012 /* Try to make sure MCHBAR is enabled before poking at it */
2013 intel_setup_mchbar(dev
);
2014 intel_setup_gmbus(dev
);
2015 intel_opregion_setup(dev
);
2017 /* Make sure the bios did its job and set up vital registers */
2018 intel_setup_bios(dev
);
2023 if (!I915_NEED_GFX_HWS(dev
)) {
2024 ret
= i915_init_phys_hws(dev
);
2026 goto out_gem_unload
;
2029 if (IS_PINEVIEW(dev
))
2030 i915_pineview_get_mem_freq(dev
);
2031 else if (IS_GEN5(dev
))
2032 i915_ironlake_get_mem_freq(dev
);
2034 /* On the 945G/GM, the chipset reports the MSI capability on the
2035 * integrated graphics even though the support isn't actually there
2036 * according to the published specs. It doesn't appear to function
2037 * correctly in testing on 945G.
2038 * This may be a side effect of MSI having been made available for PEG
2039 * and the registers being closely associated.
2041 * According to chipset errata, on the 965GM, MSI interrupts may
2042 * be lost or delayed, but we use them anyways to avoid
2043 * stuck interrupts on some machines.
2045 if (!IS_I945G(dev
) && !IS_I945GM(dev
))
2046 pci_enable_msi(dev
->pdev
);
2048 spin_lock_init(&dev_priv
->gt_lock
);
2049 spin_lock_init(&dev_priv
->irq_lock
);
2050 spin_lock_init(&dev_priv
->error_lock
);
2051 spin_lock_init(&dev_priv
->rps_lock
);
2053 if (IS_IVYBRIDGE(dev
))
2054 dev_priv
->num_pipe
= 3;
2055 else if (IS_MOBILE(dev
) || !IS_GEN2(dev
))
2056 dev_priv
->num_pipe
= 2;
2058 dev_priv
->num_pipe
= 1;
2060 ret
= drm_vblank_init(dev
, dev_priv
->num_pipe
);
2062 goto out_gem_unload
;
2064 /* Start out suspended */
2065 dev_priv
->mm
.suspended
= 1;
2067 intel_detect_pch(dev
);
2069 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2070 ret
= i915_load_modeset_init(dev
);
2072 DRM_ERROR("failed to init modeset\n");
2073 goto out_gem_unload
;
2077 /* Must be done after probing outputs */
2078 intel_opregion_init(dev
);
2079 acpi_video_register();
2081 setup_timer(&dev_priv
->hangcheck_timer
, i915_hangcheck_elapsed
,
2082 (unsigned long) dev
);
2084 spin_lock(&mchdev_lock
);
2085 i915_mch_dev
= dev_priv
;
2086 dev_priv
->mchdev_lock
= &mchdev_lock
;
2087 spin_unlock(&mchdev_lock
);
2089 ips_ping_for_i915_load();
2094 if (dev_priv
->mm
.inactive_shrinker
.shrink
)
2095 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
2097 if (dev
->pdev
->msi_enabled
)
2098 pci_disable_msi(dev
->pdev
);
2100 intel_teardown_gmbus(dev
);
2101 intel_teardown_mchbar(dev
);
2102 destroy_workqueue(dev_priv
->wq
);
2104 if (dev_priv
->mm
.gtt_mtrr
>= 0) {
2105 mtrr_del(dev_priv
->mm
.gtt_mtrr
, dev
->agp
->base
,
2106 dev
->agp
->agp_info
.aper_size
* 1024 * 1024);
2107 dev_priv
->mm
.gtt_mtrr
= -1;
2109 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2111 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
2113 pci_dev_put(dev_priv
->bridge_dev
);
2119 int i915_driver_unload(struct drm_device
*dev
)
2121 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2124 spin_lock(&mchdev_lock
);
2125 i915_mch_dev
= NULL
;
2126 spin_unlock(&mchdev_lock
);
2128 if (dev_priv
->mm
.inactive_shrinker
.shrink
)
2129 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
2131 mutex_lock(&dev
->struct_mutex
);
2132 ret
= i915_gpu_idle(dev
);
2134 DRM_ERROR("failed to idle hardware: %d\n", ret
);
2135 mutex_unlock(&dev
->struct_mutex
);
2137 /* Cancel the retire work handler, which should be idle now. */
2138 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2140 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2141 if (dev_priv
->mm
.gtt_mtrr
>= 0) {
2142 mtrr_del(dev_priv
->mm
.gtt_mtrr
, dev
->agp
->base
,
2143 dev
->agp
->agp_info
.aper_size
* 1024 * 1024);
2144 dev_priv
->mm
.gtt_mtrr
= -1;
2147 acpi_video_unregister();
2149 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2150 intel_fbdev_fini(dev
);
2151 intel_modeset_cleanup(dev
);
2154 * free the memory space allocated for the child device
2155 * config parsed from VBT
2157 if (dev_priv
->child_dev
&& dev_priv
->child_dev_num
) {
2158 kfree(dev_priv
->child_dev
);
2159 dev_priv
->child_dev
= NULL
;
2160 dev_priv
->child_dev_num
= 0;
2163 vga_switcheroo_unregister_client(dev
->pdev
);
2164 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
2167 /* Free error state after interrupts are fully disabled. */
2168 del_timer_sync(&dev_priv
->hangcheck_timer
);
2169 cancel_work_sync(&dev_priv
->error_work
);
2170 i915_destroy_error_state(dev
);
2172 if (dev
->pdev
->msi_enabled
)
2173 pci_disable_msi(dev
->pdev
);
2175 intel_opregion_fini(dev
);
2177 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2178 /* Flush any outstanding unpin_work. */
2179 flush_workqueue(dev_priv
->wq
);
2181 mutex_lock(&dev
->struct_mutex
);
2182 i915_gem_free_all_phys_object(dev
);
2183 i915_gem_cleanup_ringbuffer(dev
);
2184 mutex_unlock(&dev
->struct_mutex
);
2185 if (I915_HAS_FBC(dev
) && i915_powersave
)
2186 i915_cleanup_compression(dev
);
2187 drm_mm_takedown(&dev_priv
->mm
.stolen
);
2189 intel_cleanup_overlay(dev
);
2191 if (!I915_NEED_GFX_HWS(dev
))
2195 if (dev_priv
->regs
!= NULL
)
2196 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
2198 intel_teardown_gmbus(dev
);
2199 intel_teardown_mchbar(dev
);
2201 destroy_workqueue(dev_priv
->wq
);
2203 pci_dev_put(dev_priv
->bridge_dev
);
2204 kfree(dev
->dev_private
);
2209 int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file
)
2211 struct drm_i915_file_private
*file_priv
;
2213 DRM_DEBUG_DRIVER("\n");
2214 file_priv
= kmalloc(sizeof(*file_priv
), GFP_KERNEL
);
2218 file
->driver_priv
= file_priv
;
2220 spin_lock_init(&file_priv
->mm
.lock
);
2221 INIT_LIST_HEAD(&file_priv
->mm
.request_list
);
2227 * i915_driver_lastclose - clean up after all DRM clients have exited
2230 * Take care of cleaning up after all DRM clients have exited. In the
2231 * mode setting case, we want to restore the kernel's initial mode (just
2232 * in case the last client left us in a bad state).
2234 * Additionally, in the non-mode setting case, we'll tear down the AGP
2235 * and DMA structures, since the kernel won't be using them, and clea
2238 void i915_driver_lastclose(struct drm_device
* dev
)
2240 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2242 if (!dev_priv
|| drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2243 intel_fb_restore_mode(dev
);
2244 vga_switcheroo_process_delayed_switch();
2248 i915_gem_lastclose(dev
);
2250 if (dev_priv
->agp_heap
)
2251 i915_mem_takedown(&(dev_priv
->agp_heap
));
2253 i915_dma_cleanup(dev
);
2256 void i915_driver_preclose(struct drm_device
* dev
, struct drm_file
*file_priv
)
2258 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2259 i915_gem_release(dev
, file_priv
);
2260 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
2261 i915_mem_release(dev
, file_priv
, dev_priv
->agp_heap
);
2264 void i915_driver_postclose(struct drm_device
*dev
, struct drm_file
*file
)
2266 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2271 struct drm_ioctl_desc i915_ioctls
[] = {
2272 DRM_IOCTL_DEF_DRV(I915_INIT
, i915_dma_init
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2273 DRM_IOCTL_DEF_DRV(I915_FLUSH
, i915_flush_ioctl
, DRM_AUTH
),
2274 DRM_IOCTL_DEF_DRV(I915_FLIP
, i915_flip_bufs
, DRM_AUTH
),
2275 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER
, i915_batchbuffer
, DRM_AUTH
),
2276 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT
, i915_irq_emit
, DRM_AUTH
),
2277 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT
, i915_irq_wait
, DRM_AUTH
),
2278 DRM_IOCTL_DEF_DRV(I915_GETPARAM
, i915_getparam
, DRM_AUTH
),
2279 DRM_IOCTL_DEF_DRV(I915_SETPARAM
, i915_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2280 DRM_IOCTL_DEF_DRV(I915_ALLOC
, i915_mem_alloc
, DRM_AUTH
),
2281 DRM_IOCTL_DEF_DRV(I915_FREE
, i915_mem_free
, DRM_AUTH
),
2282 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP
, i915_mem_init_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2283 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER
, i915_cmdbuffer
, DRM_AUTH
),
2284 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP
, i915_mem_destroy_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2285 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE
, i915_vblank_pipe_set
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2286 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE
, i915_vblank_pipe_get
, DRM_AUTH
),
2287 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP
, i915_vblank_swap
, DRM_AUTH
),
2288 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR
, i915_set_status_page
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2289 DRM_IOCTL_DEF_DRV(I915_GEM_INIT
, i915_gem_init_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2290 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER
, i915_gem_execbuffer
, DRM_AUTH
|DRM_UNLOCKED
),
2291 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2
, i915_gem_execbuffer2
, DRM_AUTH
|DRM_UNLOCKED
),
2292 DRM_IOCTL_DEF_DRV(I915_GEM_PIN
, i915_gem_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2293 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN
, i915_gem_unpin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2294 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY
, i915_gem_busy_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
2295 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE
, i915_gem_throttle_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
2296 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT
, i915_gem_entervt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2297 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT
, i915_gem_leavevt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2298 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE
, i915_gem_create_ioctl
, DRM_UNLOCKED
),
2299 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD
, i915_gem_pread_ioctl
, DRM_UNLOCKED
),
2300 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE
, i915_gem_pwrite_ioctl
, DRM_UNLOCKED
),
2301 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP
, i915_gem_mmap_ioctl
, DRM_UNLOCKED
),
2302 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT
, i915_gem_mmap_gtt_ioctl
, DRM_UNLOCKED
),
2303 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN
, i915_gem_set_domain_ioctl
, DRM_UNLOCKED
),
2304 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH
, i915_gem_sw_finish_ioctl
, DRM_UNLOCKED
),
2305 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING
, i915_gem_set_tiling
, DRM_UNLOCKED
),
2306 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING
, i915_gem_get_tiling
, DRM_UNLOCKED
),
2307 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE
, i915_gem_get_aperture_ioctl
, DRM_UNLOCKED
),
2308 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID
, intel_get_pipe_from_crtc_id
, DRM_UNLOCKED
),
2309 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE
, i915_gem_madvise_ioctl
, DRM_UNLOCKED
),
2310 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE
, intel_overlay_put_image
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2311 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS
, intel_overlay_attrs
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2312 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY
, intel_sprite_set_colorkey
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2313 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY
, intel_sprite_get_colorkey
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2316 int i915_max_ioctl
= DRM_ARRAY_SIZE(i915_ioctls
);
2319 * Determine if the device really is AGP or not.
2321 * All Intel graphics chipsets are treated as AGP, even if they are really
2324 * \param dev The device to be tested.
2327 * A value of 1 is always retured to indictate every i9x5 is AGP.
2329 int i915_driver_device_is_agp(struct drm_device
* dev
)