1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <linux/device.h>
35 #include "i915_trace.h"
36 #include "intel_drv.h"
38 #include <linux/console.h>
39 #include <linux/module.h>
40 #include "drm_crtc_helper.h"
42 static int i915_modeset __read_mostly
= -1;
43 module_param_named(modeset
, i915_modeset
, int, 0400);
44 MODULE_PARM_DESC(modeset
,
45 "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
46 "1=on, -1=force vga console preference [default])");
48 unsigned int i915_fbpercrtc __always_unused
= 0;
49 module_param_named(fbpercrtc
, i915_fbpercrtc
, int, 0400);
51 int i915_panel_ignore_lid __read_mostly
= 0;
52 module_param_named(panel_ignore_lid
, i915_panel_ignore_lid
, int, 0600);
53 MODULE_PARM_DESC(panel_ignore_lid
,
54 "Override lid status (0=autodetect [default], 1=lid open, "
57 unsigned int i915_powersave __read_mostly
= 1;
58 module_param_named(powersave
, i915_powersave
, int, 0600);
59 MODULE_PARM_DESC(powersave
,
60 "Enable powersavings, fbc, downclocking, etc. (default: true)");
62 int i915_semaphores __read_mostly
= -1;
63 module_param_named(semaphores
, i915_semaphores
, int, 0600);
64 MODULE_PARM_DESC(semaphores
,
65 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
67 int i915_enable_rc6 __read_mostly
= -1;
68 module_param_named(i915_enable_rc6
, i915_enable_rc6
, int, 0400);
69 MODULE_PARM_DESC(i915_enable_rc6
,
70 "Enable power-saving render C-state 6. "
71 "Different stages can be selected via bitmask values "
72 "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
73 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
74 "default: -1 (use per-chip default)");
76 int i915_enable_fbc __read_mostly
= -1;
77 module_param_named(i915_enable_fbc
, i915_enable_fbc
, int, 0600);
78 MODULE_PARM_DESC(i915_enable_fbc
,
79 "Enable frame buffer compression for power savings "
80 "(default: -1 (use per-chip default))");
82 unsigned int i915_lvds_downclock __read_mostly
= 0;
83 module_param_named(lvds_downclock
, i915_lvds_downclock
, int, 0400);
84 MODULE_PARM_DESC(lvds_downclock
,
85 "Use panel (LVDS/eDP) downclocking for power savings "
88 int i915_lvds_channel_mode __read_mostly
;
89 module_param_named(lvds_channel_mode
, i915_lvds_channel_mode
, int, 0600);
90 MODULE_PARM_DESC(lvds_channel_mode
,
91 "Specify LVDS channel mode "
92 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
94 int i915_panel_use_ssc __read_mostly
= -1;
95 module_param_named(lvds_use_ssc
, i915_panel_use_ssc
, int, 0600);
96 MODULE_PARM_DESC(lvds_use_ssc
,
97 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
98 "(default: auto from VBT)");
100 int i915_vbt_sdvo_panel_type __read_mostly
= -1;
101 module_param_named(vbt_sdvo_panel_type
, i915_vbt_sdvo_panel_type
, int, 0600);
102 MODULE_PARM_DESC(vbt_sdvo_panel_type
,
103 "Override/Ignore selection of SDVO panel mode in the VBT "
104 "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
106 static bool i915_try_reset __read_mostly
= true;
107 module_param_named(reset
, i915_try_reset
, bool, 0600);
108 MODULE_PARM_DESC(reset
, "Attempt GPU resets (default: true)");
110 bool i915_enable_hangcheck __read_mostly
= true;
111 module_param_named(enable_hangcheck
, i915_enable_hangcheck
, bool, 0644);
112 MODULE_PARM_DESC(enable_hangcheck
,
113 "Periodically check GPU activity for detecting hangs. "
114 "WARNING: Disabling this can cause system wide hangs. "
117 int i915_enable_ppgtt __read_mostly
= -1;
118 module_param_named(i915_enable_ppgtt
, i915_enable_ppgtt
, int, 0600);
119 MODULE_PARM_DESC(i915_enable_ppgtt
,
120 "Enable PPGTT (default: true)");
122 static struct drm_driver driver
;
123 extern int intel_agp_enabled
;
125 #define INTEL_VGA_DEVICE(id, info) { \
126 .class = PCI_BASE_CLASS_DISPLAY << 16, \
127 .class_mask = 0xff0000, \
130 .subvendor = PCI_ANY_ID, \
131 .subdevice = PCI_ANY_ID, \
132 .driver_data = (unsigned long) info }
134 static const struct intel_device_info intel_i830_info
= {
135 .gen
= 2, .is_mobile
= 1, .cursor_needs_physical
= 1,
136 .has_overlay
= 1, .overlay_needs_physical
= 1,
139 static const struct intel_device_info intel_845g_info
= {
141 .has_overlay
= 1, .overlay_needs_physical
= 1,
144 static const struct intel_device_info intel_i85x_info
= {
145 .gen
= 2, .is_i85x
= 1, .is_mobile
= 1,
146 .cursor_needs_physical
= 1,
147 .has_overlay
= 1, .overlay_needs_physical
= 1,
150 static const struct intel_device_info intel_i865g_info
= {
152 .has_overlay
= 1, .overlay_needs_physical
= 1,
155 static const struct intel_device_info intel_i915g_info
= {
156 .gen
= 3, .is_i915g
= 1, .cursor_needs_physical
= 1,
157 .has_overlay
= 1, .overlay_needs_physical
= 1,
159 static const struct intel_device_info intel_i915gm_info
= {
160 .gen
= 3, .is_mobile
= 1,
161 .cursor_needs_physical
= 1,
162 .has_overlay
= 1, .overlay_needs_physical
= 1,
165 static const struct intel_device_info intel_i945g_info
= {
166 .gen
= 3, .has_hotplug
= 1, .cursor_needs_physical
= 1,
167 .has_overlay
= 1, .overlay_needs_physical
= 1,
169 static const struct intel_device_info intel_i945gm_info
= {
170 .gen
= 3, .is_i945gm
= 1, .is_mobile
= 1,
171 .has_hotplug
= 1, .cursor_needs_physical
= 1,
172 .has_overlay
= 1, .overlay_needs_physical
= 1,
176 static const struct intel_device_info intel_i965g_info
= {
177 .gen
= 4, .is_broadwater
= 1,
182 static const struct intel_device_info intel_i965gm_info
= {
183 .gen
= 4, .is_crestline
= 1,
184 .is_mobile
= 1, .has_fbc
= 1, .has_hotplug
= 1,
189 static const struct intel_device_info intel_g33_info
= {
190 .gen
= 3, .is_g33
= 1,
191 .need_gfx_hws
= 1, .has_hotplug
= 1,
195 static const struct intel_device_info intel_g45_info
= {
196 .gen
= 4, .is_g4x
= 1, .need_gfx_hws
= 1,
197 .has_pipe_cxsr
= 1, .has_hotplug
= 1,
201 static const struct intel_device_info intel_gm45_info
= {
202 .gen
= 4, .is_g4x
= 1,
203 .is_mobile
= 1, .need_gfx_hws
= 1, .has_fbc
= 1,
204 .has_pipe_cxsr
= 1, .has_hotplug
= 1,
209 static const struct intel_device_info intel_pineview_info
= {
210 .gen
= 3, .is_g33
= 1, .is_pineview
= 1, .is_mobile
= 1,
211 .need_gfx_hws
= 1, .has_hotplug
= 1,
215 static const struct intel_device_info intel_ironlake_d_info
= {
217 .need_gfx_hws
= 1, .has_hotplug
= 1,
221 static const struct intel_device_info intel_ironlake_m_info
= {
222 .gen
= 5, .is_mobile
= 1,
223 .need_gfx_hws
= 1, .has_hotplug
= 1,
228 static const struct intel_device_info intel_sandybridge_d_info
= {
230 .need_gfx_hws
= 1, .has_hotplug
= 1,
237 static const struct intel_device_info intel_sandybridge_m_info
= {
238 .gen
= 6, .is_mobile
= 1,
239 .need_gfx_hws
= 1, .has_hotplug
= 1,
247 static const struct intel_device_info intel_ivybridge_d_info
= {
248 .is_ivybridge
= 1, .gen
= 7,
249 .need_gfx_hws
= 1, .has_hotplug
= 1,
256 static const struct intel_device_info intel_ivybridge_m_info
= {
257 .is_ivybridge
= 1, .gen
= 7, .is_mobile
= 1,
258 .need_gfx_hws
= 1, .has_hotplug
= 1,
259 .has_fbc
= 0, /* FBC is not enabled on Ivybridge mobile yet */
266 static const struct intel_device_info intel_valleyview_m_info
= {
267 .gen
= 7, .is_mobile
= 1,
268 .need_gfx_hws
= 1, .has_hotplug
= 1,
275 static const struct intel_device_info intel_valleyview_d_info
= {
277 .need_gfx_hws
= 1, .has_hotplug
= 1,
284 static const struct intel_device_info intel_haswell_d_info
= {
285 .is_haswell
= 1, .gen
= 7,
286 .need_gfx_hws
= 1, .has_hotplug
= 1,
293 static const struct intel_device_info intel_haswell_m_info
= {
294 .is_haswell
= 1, .gen
= 7, .is_mobile
= 1,
295 .need_gfx_hws
= 1, .has_hotplug
= 1,
302 static const struct pci_device_id pciidlist
[] = { /* aka */
303 INTEL_VGA_DEVICE(0x3577, &intel_i830_info
), /* I830_M */
304 INTEL_VGA_DEVICE(0x2562, &intel_845g_info
), /* 845_G */
305 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info
), /* I855_GM */
306 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info
),
307 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info
), /* I865_G */
308 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info
), /* I915_G */
309 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info
), /* E7221_G */
310 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info
), /* I915_GM */
311 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info
), /* I945_G */
312 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info
), /* I945_GM */
313 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info
), /* I945_GME */
314 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info
), /* I946_GZ */
315 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info
), /* G35_G */
316 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info
), /* I965_Q */
317 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info
), /* I965_G */
318 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info
), /* Q35_G */
319 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info
), /* G33_G */
320 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info
), /* Q33_G */
321 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info
), /* I965_GM */
322 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info
), /* I965_GME */
323 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info
), /* GM45_G */
324 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info
), /* IGD_E_G */
325 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info
), /* Q45_G */
326 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info
), /* G45_G */
327 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info
), /* G41_G */
328 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info
), /* B43_G */
329 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info
), /* B43_G.1 */
330 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info
),
331 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info
),
332 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info
),
333 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info
),
334 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info
),
335 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info
),
336 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info
),
337 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info
),
338 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info
),
339 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info
),
340 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info
),
341 INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info
), /* GT1 mobile */
342 INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info
), /* GT2 mobile */
343 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info
), /* GT1 desktop */
344 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info
), /* GT2 desktop */
345 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info
), /* GT1 server */
346 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info
), /* GT2 server */
347 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info
), /* GT1 desktop */
348 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info
), /* GT2 desktop */
349 INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info
), /* GT2 desktop */
350 INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info
), /* GT1 server */
351 INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info
), /* GT2 server */
352 INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info
), /* GT2 server */
353 INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info
), /* GT1 mobile */
354 INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info
), /* GT2 mobile */
355 INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info
), /* GT2 mobile */
356 INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info
), /* SDV GT1 desktop */
357 INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info
), /* SDV GT2 desktop */
358 INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info
), /* SDV GT2 desktop */
359 INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info
), /* SDV GT1 server */
360 INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info
), /* SDV GT2 server */
361 INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info
), /* SDV GT2 server */
362 INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info
), /* SDV GT1 mobile */
363 INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info
), /* SDV GT2 mobile */
364 INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info
), /* SDV GT2 mobile */
365 INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info
), /* ULT GT1 desktop */
366 INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info
), /* ULT GT2 desktop */
367 INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info
), /* ULT GT2 desktop */
368 INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info
), /* ULT GT1 server */
369 INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info
), /* ULT GT2 server */
370 INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info
), /* ULT GT2 server */
371 INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info
), /* ULT GT1 mobile */
372 INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info
), /* ULT GT2 mobile */
373 INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info
), /* ULT GT2 mobile */
374 INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info
), /* CRW GT1 desktop */
375 INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info
), /* CRW GT2 desktop */
376 INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info
), /* CRW GT2 desktop */
377 INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info
), /* CRW GT1 server */
378 INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info
), /* CRW GT2 server */
379 INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info
), /* CRW GT2 server */
380 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info
), /* CRW GT1 mobile */
381 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info
), /* CRW GT2 mobile */
382 INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info
), /* CRW GT2 mobile */
383 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info
),
384 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info
),
385 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info
),
389 #if defined(CONFIG_DRM_I915_KMS)
390 MODULE_DEVICE_TABLE(pci
, pciidlist
);
393 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
394 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
395 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
396 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
397 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
399 void intel_detect_pch(struct drm_device
*dev
)
401 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
405 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
406 * make graphics device passthrough work easy for VMM, that only
407 * need to expose ISA bridge to let driver know the real hardware
408 * underneath. This is a requirement from virtualization team.
410 pch
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
412 if (pch
->vendor
== PCI_VENDOR_ID_INTEL
) {
414 id
= pch
->device
& INTEL_PCH_DEVICE_ID_MASK
;
416 if (id
== INTEL_PCH_IBX_DEVICE_ID_TYPE
) {
417 dev_priv
->pch_type
= PCH_IBX
;
418 dev_priv
->num_pch_pll
= 2;
419 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
420 } else if (id
== INTEL_PCH_CPT_DEVICE_ID_TYPE
) {
421 dev_priv
->pch_type
= PCH_CPT
;
422 dev_priv
->num_pch_pll
= 2;
423 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
424 } else if (id
== INTEL_PCH_PPT_DEVICE_ID_TYPE
) {
425 /* PantherPoint is CPT compatible */
426 dev_priv
->pch_type
= PCH_CPT
;
427 dev_priv
->num_pch_pll
= 2;
428 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
429 } else if (id
== INTEL_PCH_LPT_DEVICE_ID_TYPE
) {
430 dev_priv
->pch_type
= PCH_LPT
;
431 dev_priv
->num_pch_pll
= 0;
432 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
434 BUG_ON(dev_priv
->num_pch_pll
> I915_NUM_PLLS
);
440 bool i915_semaphore_is_enabled(struct drm_device
*dev
)
442 if (INTEL_INFO(dev
)->gen
< 6)
445 if (i915_semaphores
>= 0)
446 return i915_semaphores
;
448 #ifdef CONFIG_INTEL_IOMMU
449 /* Enable semaphores on SNB when IO remapping is off */
450 if (INTEL_INFO(dev
)->gen
== 6 && intel_iommu_gfx_mapped
)
457 static int i915_drm_freeze(struct drm_device
*dev
)
459 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
461 drm_kms_helper_poll_disable(dev
);
463 pci_save_state(dev
->pdev
);
465 /* If KMS is active, we do the leavevt stuff here */
466 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
467 int error
= i915_gem_idle(dev
);
469 dev_err(&dev
->pdev
->dev
,
470 "GEM idle failed, resume might fail\n");
473 drm_irq_uninstall(dev
);
476 i915_save_state(dev
);
478 intel_opregion_fini(dev
);
480 /* Modeset on resume, not lid events */
481 dev_priv
->modeset_on_lid
= 0;
484 intel_fbdev_set_suspend(dev
, 1);
490 int i915_suspend(struct drm_device
*dev
, pm_message_t state
)
494 if (!dev
|| !dev
->dev_private
) {
495 DRM_ERROR("dev: %p\n", dev
);
496 DRM_ERROR("DRM not initialized, aborting suspend.\n");
500 if (state
.event
== PM_EVENT_PRETHAW
)
504 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
507 error
= i915_drm_freeze(dev
);
511 if (state
.event
== PM_EVENT_SUSPEND
) {
512 /* Shut down the device */
513 pci_disable_device(dev
->pdev
);
514 pci_set_power_state(dev
->pdev
, PCI_D3hot
);
520 static int i915_drm_thaw(struct drm_device
*dev
)
522 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
525 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
526 mutex_lock(&dev
->struct_mutex
);
527 i915_gem_restore_gtt_mappings(dev
);
528 mutex_unlock(&dev
->struct_mutex
);
531 i915_restore_state(dev
);
532 intel_opregion_setup(dev
);
534 /* KMS EnterVT equivalent */
535 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
536 if (HAS_PCH_IBX(dev
) || HAS_PCH_CPT(dev
))
537 ironlake_init_pch_refclk(dev
);
539 mutex_lock(&dev
->struct_mutex
);
540 dev_priv
->mm
.suspended
= 0;
542 error
= i915_gem_init_hw(dev
);
543 mutex_unlock(&dev
->struct_mutex
);
545 intel_modeset_init_hw(dev
);
546 drm_mode_config_reset(dev
);
547 drm_irq_install(dev
);
549 /* Resume the modeset for every activated CRTC */
550 mutex_lock(&dev
->mode_config
.mutex
);
551 drm_helper_resume_force_mode(dev
);
552 mutex_unlock(&dev
->mode_config
.mutex
);
555 intel_opregion_init(dev
);
557 dev_priv
->modeset_on_lid
= 0;
560 intel_fbdev_set_suspend(dev
, 0);
565 int i915_resume(struct drm_device
*dev
)
569 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
572 if (pci_enable_device(dev
->pdev
))
575 pci_set_master(dev
->pdev
);
577 ret
= i915_drm_thaw(dev
);
581 drm_kms_helper_poll_enable(dev
);
585 static int i8xx_do_reset(struct drm_device
*dev
)
587 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
592 I915_WRITE(D_STATE
, I915_READ(D_STATE
) | DSTATE_GFX_RESET_I830
);
593 POSTING_READ(D_STATE
);
595 if (IS_I830(dev
) || IS_845G(dev
)) {
596 I915_WRITE(DEBUG_RESET_I830
,
597 DEBUG_RESET_DISPLAY
|
600 POSTING_READ(DEBUG_RESET_I830
);
603 I915_WRITE(DEBUG_RESET_I830
, 0);
604 POSTING_READ(DEBUG_RESET_I830
);
609 I915_WRITE(D_STATE
, I915_READ(D_STATE
) & ~DSTATE_GFX_RESET_I830
);
610 POSTING_READ(D_STATE
);
615 static int i965_reset_complete(struct drm_device
*dev
)
618 pci_read_config_byte(dev
->pdev
, I965_GDRST
, &gdrst
);
619 return (gdrst
& GRDOM_RESET_ENABLE
) == 0;
622 static int i965_do_reset(struct drm_device
*dev
)
628 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
629 * well as the reset bit (GR/bit 0). Setting the GR bit
630 * triggers the reset; when done, the hardware will clear it.
632 pci_read_config_byte(dev
->pdev
, I965_GDRST
, &gdrst
);
633 pci_write_config_byte(dev
->pdev
, I965_GDRST
,
634 gdrst
| GRDOM_RENDER
|
636 ret
= wait_for(i965_reset_complete(dev
), 500);
640 /* We can't reset render&media without also resetting display ... */
641 pci_read_config_byte(dev
->pdev
, I965_GDRST
, &gdrst
);
642 pci_write_config_byte(dev
->pdev
, I965_GDRST
,
643 gdrst
| GRDOM_MEDIA
|
646 return wait_for(i965_reset_complete(dev
), 500);
649 static int ironlake_do_reset(struct drm_device
*dev
)
651 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
655 gdrst
= I915_READ(MCHBAR_MIRROR_BASE
+ ILK_GDSR
);
656 I915_WRITE(MCHBAR_MIRROR_BASE
+ ILK_GDSR
,
657 gdrst
| GRDOM_RENDER
| GRDOM_RESET_ENABLE
);
658 ret
= wait_for(I915_READ(MCHBAR_MIRROR_BASE
+ ILK_GDSR
) & 0x1, 500);
662 /* We can't reset render&media without also resetting display ... */
663 gdrst
= I915_READ(MCHBAR_MIRROR_BASE
+ ILK_GDSR
);
664 I915_WRITE(MCHBAR_MIRROR_BASE
+ ILK_GDSR
,
665 gdrst
| GRDOM_MEDIA
| GRDOM_RESET_ENABLE
);
666 return wait_for(I915_READ(MCHBAR_MIRROR_BASE
+ ILK_GDSR
) & 0x1, 500);
669 static int gen6_do_reset(struct drm_device
*dev
)
671 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
673 unsigned long irqflags
;
675 /* Hold gt_lock across reset to prevent any register access
676 * with forcewake not set correctly
678 spin_lock_irqsave(&dev_priv
->gt_lock
, irqflags
);
682 /* GEN6_GDRST is not in the gt power well, no need to check
683 * for fifo space for the write or forcewake the chip for
686 I915_WRITE_NOTRACE(GEN6_GDRST
, GEN6_GRDOM_FULL
);
688 /* Spin waiting for the device to ack the reset request */
689 ret
= wait_for((I915_READ_NOTRACE(GEN6_GDRST
) & GEN6_GRDOM_FULL
) == 0, 500);
691 /* If reset with a user forcewake, try to restore, otherwise turn it off */
692 if (dev_priv
->forcewake_count
)
693 dev_priv
->gt
.force_wake_get(dev_priv
);
695 dev_priv
->gt
.force_wake_put(dev_priv
);
697 /* Restore fifo count */
698 dev_priv
->gt_fifo_count
= I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES
);
700 spin_unlock_irqrestore(&dev_priv
->gt_lock
, irqflags
);
704 int intel_gpu_reset(struct drm_device
*dev
)
706 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
709 switch (INTEL_INFO(dev
)->gen
) {
712 ret
= gen6_do_reset(dev
);
715 ret
= ironlake_do_reset(dev
);
718 ret
= i965_do_reset(dev
);
721 ret
= i8xx_do_reset(dev
);
725 /* Also reset the gpu hangman. */
726 if (dev_priv
->stop_rings
) {
727 DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
728 dev_priv
->stop_rings
= 0;
729 if (ret
== -ENODEV
) {
730 DRM_ERROR("Reset not implemented, but ignoring "
731 "error for simulated gpu hangs\n");
740 * i915_reset - reset chip after a hang
741 * @dev: drm device to reset
743 * Reset the chip. Useful if a hang is detected. Returns zero on successful
744 * reset or otherwise an error code.
746 * Procedure is fairly simple:
747 * - reset the chip using the reset reg
748 * - re-init context state
749 * - re-init hardware status page
750 * - re-init ring buffer
751 * - re-init interrupt state
754 int i915_reset(struct drm_device
*dev
)
756 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
762 mutex_lock(&dev
->struct_mutex
);
767 if (get_seconds() - dev_priv
->last_gpu_reset
< 5)
768 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
770 ret
= intel_gpu_reset(dev
);
772 dev_priv
->last_gpu_reset
= get_seconds();
774 DRM_ERROR("Failed to reset chip.\n");
775 mutex_unlock(&dev
->struct_mutex
);
779 /* Ok, now get things going again... */
782 * Everything depends on having the GTT running, so we need to start
783 * there. Fortunately we don't need to do this unless we reset the
784 * chip at a PCI level.
786 * Next we need to restore the context, but we don't use those
789 * Ring buffer needs to be re-initialized in the KMS case, or if X
790 * was running at the time of the reset (i.e. we weren't VT
793 if (drm_core_check_feature(dev
, DRIVER_MODESET
) ||
794 !dev_priv
->mm
.suspended
) {
795 struct intel_ring_buffer
*ring
;
798 dev_priv
->mm
.suspended
= 0;
800 i915_gem_init_swizzling(dev
);
802 for_each_ring(ring
, dev_priv
, i
)
805 i915_gem_context_init(dev
);
806 i915_gem_init_ppgtt(dev
);
809 * It would make sense to re-init all the other hw state, at
810 * least the rps/rc6/emon init done within modeset_init_hw. For
811 * some unknown reason, this blows up my ilk, so don't.
814 mutex_unlock(&dev
->struct_mutex
);
816 drm_irq_uninstall(dev
);
817 drm_irq_install(dev
);
819 mutex_unlock(&dev
->struct_mutex
);
826 i915_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
828 struct intel_device_info
*intel_info
=
829 (struct intel_device_info
*) ent
->driver_data
;
831 /* Only bind to function 0 of the device. Early generations
832 * used function 1 as a placeholder for multi-head. This causes
833 * us confusion instead, especially on the systems where both
834 * functions have the same PCI-ID!
836 if (PCI_FUNC(pdev
->devfn
))
839 /* We've managed to ship a kms-enabled ddx that shipped with an XvMC
840 * implementation for gen3 (and only gen3) that used legacy drm maps
841 * (gasp!) to share buffers between X and the client. Hence we need to
842 * keep around the fake agp stuff for gen3, even when kms is enabled. */
843 if (intel_info
->gen
!= 3) {
844 driver
.driver_features
&=
845 ~(DRIVER_USE_AGP
| DRIVER_REQUIRE_AGP
);
846 } else if (!intel_agp_enabled
) {
847 DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
851 return drm_get_pci_dev(pdev
, ent
, &driver
);
855 i915_pci_remove(struct pci_dev
*pdev
)
857 struct drm_device
*dev
= pci_get_drvdata(pdev
);
862 static int i915_pm_suspend(struct device
*dev
)
864 struct pci_dev
*pdev
= to_pci_dev(dev
);
865 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
868 if (!drm_dev
|| !drm_dev
->dev_private
) {
869 dev_err(dev
, "DRM not initialized, aborting suspend.\n");
873 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
876 error
= i915_drm_freeze(drm_dev
);
880 pci_disable_device(pdev
);
881 pci_set_power_state(pdev
, PCI_D3hot
);
886 static int i915_pm_resume(struct device
*dev
)
888 struct pci_dev
*pdev
= to_pci_dev(dev
);
889 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
891 return i915_resume(drm_dev
);
894 static int i915_pm_freeze(struct device
*dev
)
896 struct pci_dev
*pdev
= to_pci_dev(dev
);
897 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
899 if (!drm_dev
|| !drm_dev
->dev_private
) {
900 dev_err(dev
, "DRM not initialized, aborting suspend.\n");
904 return i915_drm_freeze(drm_dev
);
907 static int i915_pm_thaw(struct device
*dev
)
909 struct pci_dev
*pdev
= to_pci_dev(dev
);
910 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
912 return i915_drm_thaw(drm_dev
);
915 static int i915_pm_poweroff(struct device
*dev
)
917 struct pci_dev
*pdev
= to_pci_dev(dev
);
918 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
920 return i915_drm_freeze(drm_dev
);
923 static const struct dev_pm_ops i915_pm_ops
= {
924 .suspend
= i915_pm_suspend
,
925 .resume
= i915_pm_resume
,
926 .freeze
= i915_pm_freeze
,
927 .thaw
= i915_pm_thaw
,
928 .poweroff
= i915_pm_poweroff
,
929 .restore
= i915_pm_resume
,
932 static const struct vm_operations_struct i915_gem_vm_ops
= {
933 .fault
= i915_gem_fault
,
934 .open
= drm_gem_vm_open
,
935 .close
= drm_gem_vm_close
,
938 static const struct file_operations i915_driver_fops
= {
939 .owner
= THIS_MODULE
,
941 .release
= drm_release
,
942 .unlocked_ioctl
= drm_ioctl
,
943 .mmap
= drm_gem_mmap
,
945 .fasync
= drm_fasync
,
948 .compat_ioctl
= i915_compat_ioctl
,
950 .llseek
= noop_llseek
,
953 static struct drm_driver driver
= {
954 /* Don't use MTRRs here; the Xserver or userspace app should
955 * deal with them for Intel hardware.
958 DRIVER_USE_AGP
| DRIVER_REQUIRE_AGP
| /* DRIVER_USE_MTRR |*/
959 DRIVER_HAVE_IRQ
| DRIVER_IRQ_SHARED
| DRIVER_GEM
| DRIVER_PRIME
,
960 .load
= i915_driver_load
,
961 .unload
= i915_driver_unload
,
962 .open
= i915_driver_open
,
963 .lastclose
= i915_driver_lastclose
,
964 .preclose
= i915_driver_preclose
,
965 .postclose
= i915_driver_postclose
,
967 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
968 .suspend
= i915_suspend
,
969 .resume
= i915_resume
,
971 .device_is_agp
= i915_driver_device_is_agp
,
972 .master_create
= i915_master_create
,
973 .master_destroy
= i915_master_destroy
,
974 #if defined(CONFIG_DEBUG_FS)
975 .debugfs_init
= i915_debugfs_init
,
976 .debugfs_cleanup
= i915_debugfs_cleanup
,
978 .gem_init_object
= i915_gem_init_object
,
979 .gem_free_object
= i915_gem_free_object
,
980 .gem_vm_ops
= &i915_gem_vm_ops
,
982 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
983 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
984 .gem_prime_export
= i915_gem_prime_export
,
985 .gem_prime_import
= i915_gem_prime_import
,
987 .dumb_create
= i915_gem_dumb_create
,
988 .dumb_map_offset
= i915_gem_mmap_gtt
,
989 .dumb_destroy
= i915_gem_dumb_destroy
,
990 .ioctls
= i915_ioctls
,
991 .fops
= &i915_driver_fops
,
995 .major
= DRIVER_MAJOR
,
996 .minor
= DRIVER_MINOR
,
997 .patchlevel
= DRIVER_PATCHLEVEL
,
1000 static struct pci_driver i915_pci_driver
= {
1001 .name
= DRIVER_NAME
,
1002 .id_table
= pciidlist
,
1003 .probe
= i915_pci_probe
,
1004 .remove
= i915_pci_remove
,
1005 .driver
.pm
= &i915_pm_ops
,
1008 static int __init
i915_init(void)
1010 driver
.num_ioctls
= i915_max_ioctl
;
1013 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1014 * explicitly disabled with the module pararmeter.
1016 * Otherwise, just follow the parameter (defaulting to off).
1018 * Allow optional vga_text_mode_force boot option to override
1019 * the default behavior.
1021 #if defined(CONFIG_DRM_I915_KMS)
1022 if (i915_modeset
!= 0)
1023 driver
.driver_features
|= DRIVER_MODESET
;
1025 if (i915_modeset
== 1)
1026 driver
.driver_features
|= DRIVER_MODESET
;
1028 #ifdef CONFIG_VGA_CONSOLE
1029 if (vgacon_text_force() && i915_modeset
== -1)
1030 driver
.driver_features
&= ~DRIVER_MODESET
;
1033 if (!(driver
.driver_features
& DRIVER_MODESET
))
1034 driver
.get_vblank_timestamp
= NULL
;
1036 return drm_pci_init(&driver
, &i915_pci_driver
);
1039 static void __exit
i915_exit(void)
1041 drm_pci_exit(&driver
, &i915_pci_driver
);
1044 module_init(i915_init
);
1045 module_exit(i915_exit
);
1047 MODULE_AUTHOR(DRIVER_AUTHOR
);
1048 MODULE_DESCRIPTION(DRIVER_DESC
);
1049 MODULE_LICENSE("GPL and additional rights");
1051 /* We give fast paths for the really cool registers */
1052 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
1053 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1054 ((reg) < 0x40000) && \
1055 ((reg) != FORCEWAKE))
1057 static bool IS_DISPLAYREG(u32 reg
)
1060 * This should make it easier to transition modules over to the
1061 * new register block scheme, since we can do it incrementally.
1063 if (reg
>= 0x180000)
1066 if (reg
>= RENDER_RING_BASE
&&
1067 reg
< RENDER_RING_BASE
+ 0xff)
1069 if (reg
>= GEN6_BSD_RING_BASE
&&
1070 reg
< GEN6_BSD_RING_BASE
+ 0xff)
1072 if (reg
>= BLT_RING_BASE
&&
1073 reg
< BLT_RING_BASE
+ 0xff)
1076 if (reg
== PGTBL_ER
)
1079 if (reg
>= IPEIR_I965
&&
1086 if (reg
== GFX_MODE_GEN7
)
1089 if (reg
== RENDER_HWS_PGA_GEN7
||
1090 reg
== BSD_HWS_PGA_GEN7
||
1091 reg
== BLT_HWS_PGA_GEN7
)
1094 if (reg
== GEN6_BSD_SLEEP_PSMI_CONTROL
||
1095 reg
== GEN6_BSD_RNCID
)
1098 if (reg
== GEN6_BLITTER_ECOSKPD
)
1101 if (reg
>= 0x4000c &&
1105 if (reg
>= 0x4f000 &&
1109 if (reg
>= 0x4f100 &&
1113 if (reg
>= VLV_MASTER_IER
&&
1117 if (reg
>= FENCE_REG_SANDYBRIDGE_0
&&
1118 reg
< (FENCE_REG_SANDYBRIDGE_0
+ (16*8)))
1121 if (reg
>= VLV_IIR_RW
&&
1125 if (reg
== FORCEWAKE_VLV
||
1126 reg
== FORCEWAKE_ACK_VLV
)
1129 if (reg
== GEN6_GDRST
)
1135 #define __i915_read(x, y) \
1136 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1138 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1139 unsigned long irqflags; \
1140 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
1141 if (dev_priv->forcewake_count == 0) \
1142 dev_priv->gt.force_wake_get(dev_priv); \
1143 val = read##y(dev_priv->regs + reg); \
1144 if (dev_priv->forcewake_count == 0) \
1145 dev_priv->gt.force_wake_put(dev_priv); \
1146 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1147 } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
1148 val = read##y(dev_priv->regs + reg + 0x180000); \
1150 val = read##y(dev_priv->regs + reg); \
1152 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
1162 #define __i915_write(x, y) \
1163 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1164 u32 __fifo_ret = 0; \
1165 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
1166 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1167 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1169 if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
1170 write##y(val, dev_priv->regs + reg + 0x180000); \
1172 write##y(val, dev_priv->regs + reg); \
1174 if (unlikely(__fifo_ret)) { \
1175 gen6_gt_check_fifodbg(dev_priv); \