1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <linux/device.h>
31 #include <linux/acpi.h>
33 #include <drm/i915_drm.h>
35 #include "i915_trace.h"
36 #include "intel_drv.h"
38 #include <linux/apple-gmux.h>
39 #include <linux/console.h>
40 #include <linux/module.h>
41 #include <linux/pm_runtime.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <drm/drm_crtc_helper.h>
46 static struct drm_driver driver
;
48 #define GEN_DEFAULT_PIPEOFFSETS \
49 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
50 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
51 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
52 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
53 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
55 #define GEN_CHV_PIPEOFFSETS \
56 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
57 CHV_PIPE_C_OFFSET }, \
58 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
59 CHV_TRANSCODER_C_OFFSET, }, \
60 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
61 CHV_PALETTE_C_OFFSET }
63 #define CURSOR_OFFSETS \
64 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
66 #define IVB_CURSOR_OFFSETS \
67 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
69 static const struct intel_device_info intel_i830_info
= {
70 .gen
= 2, .is_mobile
= 1, .cursor_needs_physical
= 1, .num_pipes
= 2,
71 .has_overlay
= 1, .overlay_needs_physical
= 1,
72 .ring_mask
= RENDER_RING
,
73 GEN_DEFAULT_PIPEOFFSETS
,
77 static const struct intel_device_info intel_845g_info
= {
78 .gen
= 2, .num_pipes
= 1,
79 .has_overlay
= 1, .overlay_needs_physical
= 1,
80 .ring_mask
= RENDER_RING
,
81 GEN_DEFAULT_PIPEOFFSETS
,
85 static const struct intel_device_info intel_i85x_info
= {
86 .gen
= 2, .is_i85x
= 1, .is_mobile
= 1, .num_pipes
= 2,
87 .cursor_needs_physical
= 1,
88 .has_overlay
= 1, .overlay_needs_physical
= 1,
90 .ring_mask
= RENDER_RING
,
91 GEN_DEFAULT_PIPEOFFSETS
,
95 static const struct intel_device_info intel_i865g_info
= {
96 .gen
= 2, .num_pipes
= 1,
97 .has_overlay
= 1, .overlay_needs_physical
= 1,
98 .ring_mask
= RENDER_RING
,
99 GEN_DEFAULT_PIPEOFFSETS
,
103 static const struct intel_device_info intel_i915g_info
= {
104 .gen
= 3, .is_i915g
= 1, .cursor_needs_physical
= 1, .num_pipes
= 2,
105 .has_overlay
= 1, .overlay_needs_physical
= 1,
106 .ring_mask
= RENDER_RING
,
107 GEN_DEFAULT_PIPEOFFSETS
,
110 static const struct intel_device_info intel_i915gm_info
= {
111 .gen
= 3, .is_mobile
= 1, .num_pipes
= 2,
112 .cursor_needs_physical
= 1,
113 .has_overlay
= 1, .overlay_needs_physical
= 1,
116 .ring_mask
= RENDER_RING
,
117 GEN_DEFAULT_PIPEOFFSETS
,
120 static const struct intel_device_info intel_i945g_info
= {
121 .gen
= 3, .has_hotplug
= 1, .cursor_needs_physical
= 1, .num_pipes
= 2,
122 .has_overlay
= 1, .overlay_needs_physical
= 1,
123 .ring_mask
= RENDER_RING
,
124 GEN_DEFAULT_PIPEOFFSETS
,
127 static const struct intel_device_info intel_i945gm_info
= {
128 .gen
= 3, .is_i945gm
= 1, .is_mobile
= 1, .num_pipes
= 2,
129 .has_hotplug
= 1, .cursor_needs_physical
= 1,
130 .has_overlay
= 1, .overlay_needs_physical
= 1,
133 .ring_mask
= RENDER_RING
,
134 GEN_DEFAULT_PIPEOFFSETS
,
138 static const struct intel_device_info intel_i965g_info
= {
139 .gen
= 4, .is_broadwater
= 1, .num_pipes
= 2,
142 .ring_mask
= RENDER_RING
,
143 GEN_DEFAULT_PIPEOFFSETS
,
147 static const struct intel_device_info intel_i965gm_info
= {
148 .gen
= 4, .is_crestline
= 1, .num_pipes
= 2,
149 .is_mobile
= 1, .has_fbc
= 1, .has_hotplug
= 1,
152 .ring_mask
= RENDER_RING
,
153 GEN_DEFAULT_PIPEOFFSETS
,
157 static const struct intel_device_info intel_g33_info
= {
158 .gen
= 3, .is_g33
= 1, .num_pipes
= 2,
159 .need_gfx_hws
= 1, .has_hotplug
= 1,
161 .ring_mask
= RENDER_RING
,
162 GEN_DEFAULT_PIPEOFFSETS
,
166 static const struct intel_device_info intel_g45_info
= {
167 .gen
= 4, .is_g4x
= 1, .need_gfx_hws
= 1, .num_pipes
= 2,
168 .has_pipe_cxsr
= 1, .has_hotplug
= 1,
169 .ring_mask
= RENDER_RING
| BSD_RING
,
170 GEN_DEFAULT_PIPEOFFSETS
,
174 static const struct intel_device_info intel_gm45_info
= {
175 .gen
= 4, .is_g4x
= 1, .num_pipes
= 2,
176 .is_mobile
= 1, .need_gfx_hws
= 1, .has_fbc
= 1,
177 .has_pipe_cxsr
= 1, .has_hotplug
= 1,
179 .ring_mask
= RENDER_RING
| BSD_RING
,
180 GEN_DEFAULT_PIPEOFFSETS
,
184 static const struct intel_device_info intel_pineview_info
= {
185 .gen
= 3, .is_g33
= 1, .is_pineview
= 1, .is_mobile
= 1, .num_pipes
= 2,
186 .need_gfx_hws
= 1, .has_hotplug
= 1,
188 GEN_DEFAULT_PIPEOFFSETS
,
192 static const struct intel_device_info intel_ironlake_d_info
= {
193 .gen
= 5, .num_pipes
= 2,
194 .need_gfx_hws
= 1, .has_hotplug
= 1,
195 .ring_mask
= RENDER_RING
| BSD_RING
,
196 GEN_DEFAULT_PIPEOFFSETS
,
200 static const struct intel_device_info intel_ironlake_m_info
= {
201 .gen
= 5, .is_mobile
= 1, .num_pipes
= 2,
202 .need_gfx_hws
= 1, .has_hotplug
= 1,
204 .ring_mask
= RENDER_RING
| BSD_RING
,
205 GEN_DEFAULT_PIPEOFFSETS
,
209 static const struct intel_device_info intel_sandybridge_d_info
= {
210 .gen
= 6, .num_pipes
= 2,
211 .need_gfx_hws
= 1, .has_hotplug
= 1,
213 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
,
215 GEN_DEFAULT_PIPEOFFSETS
,
219 static const struct intel_device_info intel_sandybridge_m_info
= {
220 .gen
= 6, .is_mobile
= 1, .num_pipes
= 2,
221 .need_gfx_hws
= 1, .has_hotplug
= 1,
223 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
,
225 GEN_DEFAULT_PIPEOFFSETS
,
229 #define GEN7_FEATURES \
230 .gen = 7, .num_pipes = 3, \
231 .need_gfx_hws = 1, .has_hotplug = 1, \
233 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
235 GEN_DEFAULT_PIPEOFFSETS, \
238 static const struct intel_device_info intel_ivybridge_d_info
= {
243 static const struct intel_device_info intel_ivybridge_m_info
= {
249 static const struct intel_device_info intel_ivybridge_q_info
= {
252 .num_pipes
= 0, /* legal, last one wins */
255 #define VLV_FEATURES \
256 .gen = 7, .num_pipes = 2, \
257 .need_gfx_hws = 1, .has_hotplug = 1, \
258 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
259 .display_mmio_offset = VLV_DISPLAY_BASE, \
260 GEN_DEFAULT_PIPEOFFSETS, \
263 static const struct intel_device_info intel_valleyview_m_info
= {
269 static const struct intel_device_info intel_valleyview_d_info
= {
274 #define HSW_FEATURES \
276 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
280 static const struct intel_device_info intel_haswell_d_info
= {
285 static const struct intel_device_info intel_haswell_m_info
= {
291 static const struct intel_device_info intel_broadwell_d_info
= {
296 static const struct intel_device_info intel_broadwell_m_info
= {
298 .gen
= 8, .is_mobile
= 1,
301 static const struct intel_device_info intel_broadwell_gt3d_info
= {
304 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
| BSD2_RING
,
307 static const struct intel_device_info intel_broadwell_gt3m_info
= {
309 .gen
= 8, .is_mobile
= 1,
310 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
| BSD2_RING
,
313 static const struct intel_device_info intel_cherryview_info
= {
314 .gen
= 8, .num_pipes
= 3,
315 .need_gfx_hws
= 1, .has_hotplug
= 1,
316 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
,
318 .display_mmio_offset
= VLV_DISPLAY_BASE
,
323 static const struct intel_device_info intel_skylake_info
= {
329 static const struct intel_device_info intel_skylake_gt3_info
= {
333 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
| BSD2_RING
,
336 static const struct intel_device_info intel_broxton_info
= {
340 .need_gfx_hws
= 1, .has_hotplug
= 1,
341 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
,
346 GEN_DEFAULT_PIPEOFFSETS
,
350 static const struct intel_device_info intel_kabylake_info
= {
357 static const struct intel_device_info intel_kabylake_gt3_info
= {
362 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
| BSD2_RING
,
366 * Make sure any device matches here are from most specific to most
367 * general. For example, since the Quanta match is based on the subsystem
368 * and subvendor IDs, we need it to come before the more general IVB
369 * PCI ID matches, otherwise we'll use the wrong info struct above.
371 static const struct pci_device_id pciidlist
[] = {
372 INTEL_I830_IDS(&intel_i830_info
),
373 INTEL_I845G_IDS(&intel_845g_info
),
374 INTEL_I85X_IDS(&intel_i85x_info
),
375 INTEL_I865G_IDS(&intel_i865g_info
),
376 INTEL_I915G_IDS(&intel_i915g_info
),
377 INTEL_I915GM_IDS(&intel_i915gm_info
),
378 INTEL_I945G_IDS(&intel_i945g_info
),
379 INTEL_I945GM_IDS(&intel_i945gm_info
),
380 INTEL_I965G_IDS(&intel_i965g_info
),
381 INTEL_G33_IDS(&intel_g33_info
),
382 INTEL_I965GM_IDS(&intel_i965gm_info
),
383 INTEL_GM45_IDS(&intel_gm45_info
),
384 INTEL_G45_IDS(&intel_g45_info
),
385 INTEL_PINEVIEW_IDS(&intel_pineview_info
),
386 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info
),
387 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info
),
388 INTEL_SNB_D_IDS(&intel_sandybridge_d_info
),
389 INTEL_SNB_M_IDS(&intel_sandybridge_m_info
),
390 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info
), /* must be first IVB */
391 INTEL_IVB_M_IDS(&intel_ivybridge_m_info
),
392 INTEL_IVB_D_IDS(&intel_ivybridge_d_info
),
393 INTEL_HSW_D_IDS(&intel_haswell_d_info
),
394 INTEL_HSW_M_IDS(&intel_haswell_m_info
),
395 INTEL_VLV_M_IDS(&intel_valleyview_m_info
),
396 INTEL_VLV_D_IDS(&intel_valleyview_d_info
),
397 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info
),
398 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info
),
399 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info
),
400 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info
),
401 INTEL_CHV_IDS(&intel_cherryview_info
),
402 INTEL_SKL_GT1_IDS(&intel_skylake_info
),
403 INTEL_SKL_GT2_IDS(&intel_skylake_info
),
404 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info
),
405 INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info
),
406 INTEL_BXT_IDS(&intel_broxton_info
),
407 INTEL_KBL_GT1_IDS(&intel_kabylake_info
),
408 INTEL_KBL_GT2_IDS(&intel_kabylake_info
),
409 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info
),
410 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info
),
414 MODULE_DEVICE_TABLE(pci
, pciidlist
);
416 static enum intel_pch
intel_virt_detect_pch(struct drm_device
*dev
)
418 enum intel_pch ret
= PCH_NOP
;
421 * In a virtualized passthrough environment we can be in a
422 * setup where the ISA bridge is not able to be passed through.
423 * In this case, a south bridge can be emulated and we have to
424 * make an educated guess as to which PCH is really there.
429 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
430 } else if (IS_GEN6(dev
) || IS_IVYBRIDGE(dev
)) {
432 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
433 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
435 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
436 } else if (IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) {
438 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
444 void intel_detect_pch(struct drm_device
*dev
)
446 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
447 struct pci_dev
*pch
= NULL
;
449 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
450 * (which really amounts to a PCH but no South Display).
452 if (INTEL_INFO(dev
)->num_pipes
== 0) {
453 dev_priv
->pch_type
= PCH_NOP
;
458 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
459 * make graphics device passthrough work easy for VMM, that only
460 * need to expose ISA bridge to let driver know the real hardware
461 * underneath. This is a requirement from virtualization team.
463 * In some virtualized environments (e.g. XEN), there is irrelevant
464 * ISA bridge in the system. To work reliably, we should scan trhough
465 * all the ISA bridge devices and check for the first match, instead
466 * of only checking the first one.
468 while ((pch
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, pch
))) {
469 if (pch
->vendor
== PCI_VENDOR_ID_INTEL
) {
470 unsigned short id
= pch
->device
& INTEL_PCH_DEVICE_ID_MASK
;
471 dev_priv
->pch_id
= id
;
473 if (id
== INTEL_PCH_IBX_DEVICE_ID_TYPE
) {
474 dev_priv
->pch_type
= PCH_IBX
;
475 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
476 WARN_ON(!IS_GEN5(dev
));
477 } else if (id
== INTEL_PCH_CPT_DEVICE_ID_TYPE
) {
478 dev_priv
->pch_type
= PCH_CPT
;
479 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
480 WARN_ON(!(IS_GEN6(dev
) || IS_IVYBRIDGE(dev
)));
481 } else if (id
== INTEL_PCH_PPT_DEVICE_ID_TYPE
) {
482 /* PantherPoint is CPT compatible */
483 dev_priv
->pch_type
= PCH_CPT
;
484 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
485 WARN_ON(!(IS_GEN6(dev
) || IS_IVYBRIDGE(dev
)));
486 } else if (id
== INTEL_PCH_LPT_DEVICE_ID_TYPE
) {
487 dev_priv
->pch_type
= PCH_LPT
;
488 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
489 WARN_ON(!IS_HASWELL(dev
) && !IS_BROADWELL(dev
));
490 WARN_ON(IS_HSW_ULT(dev
) || IS_BDW_ULT(dev
));
491 } else if (id
== INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
) {
492 dev_priv
->pch_type
= PCH_LPT
;
493 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
494 WARN_ON(!IS_HASWELL(dev
) && !IS_BROADWELL(dev
));
495 WARN_ON(!IS_HSW_ULT(dev
) && !IS_BDW_ULT(dev
));
496 } else if (id
== INTEL_PCH_SPT_DEVICE_ID_TYPE
) {
497 dev_priv
->pch_type
= PCH_SPT
;
498 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
499 WARN_ON(!IS_SKYLAKE(dev
) &&
501 } else if (id
== INTEL_PCH_SPT_LP_DEVICE_ID_TYPE
) {
502 dev_priv
->pch_type
= PCH_SPT
;
503 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
504 WARN_ON(!IS_SKYLAKE(dev
) &&
506 } else if ((id
== INTEL_PCH_P2X_DEVICE_ID_TYPE
) ||
507 ((id
== INTEL_PCH_QEMU_DEVICE_ID_TYPE
) &&
508 pch
->subsystem_vendor
== 0x1af4 &&
509 pch
->subsystem_device
== 0x1100)) {
510 dev_priv
->pch_type
= intel_virt_detect_pch(dev
);
518 DRM_DEBUG_KMS("No PCH found.\n");
523 bool i915_semaphore_is_enabled(struct drm_device
*dev
)
525 if (INTEL_INFO(dev
)->gen
< 6)
528 if (i915
.semaphores
>= 0)
529 return i915
.semaphores
;
531 /* TODO: make semaphores and Execlists play nicely together */
532 if (i915
.enable_execlists
)
535 /* Until we get further testing... */
539 #ifdef CONFIG_INTEL_IOMMU
540 /* Enable semaphores on SNB when IO remapping is off */
541 if (INTEL_INFO(dev
)->gen
== 6 && intel_iommu_gfx_mapped
)
548 static void intel_suspend_encoders(struct drm_i915_private
*dev_priv
)
550 struct drm_device
*dev
= dev_priv
->dev
;
551 struct intel_encoder
*encoder
;
553 drm_modeset_lock_all(dev
);
554 for_each_intel_encoder(dev
, encoder
)
555 if (encoder
->suspend
)
556 encoder
->suspend(encoder
);
557 drm_modeset_unlock_all(dev
);
560 static int intel_suspend_complete(struct drm_i915_private
*dev_priv
);
561 static int vlv_resume_prepare(struct drm_i915_private
*dev_priv
,
563 static int bxt_resume_prepare(struct drm_i915_private
*dev_priv
);
565 static bool suspend_to_idle(struct drm_i915_private
*dev_priv
)
567 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
568 if (acpi_target_system_state() < ACPI_STATE_S3
)
574 static int i915_drm_suspend(struct drm_device
*dev
)
576 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
577 pci_power_t opregion_target_state
;
580 /* ignore lid events during suspend */
581 mutex_lock(&dev_priv
->modeset_restore_lock
);
582 dev_priv
->modeset_restore
= MODESET_SUSPENDED
;
583 mutex_unlock(&dev_priv
->modeset_restore_lock
);
585 disable_rpm_wakeref_asserts(dev_priv
);
587 /* We do a lot of poking in a lot of registers, make sure they work
589 intel_display_set_init_power(dev_priv
, true);
591 drm_kms_helper_poll_disable(dev
);
593 pci_save_state(dev
->pdev
);
595 error
= i915_gem_suspend(dev
);
597 dev_err(&dev
->pdev
->dev
,
598 "GEM idle failed, resume might fail\n");
602 intel_guc_suspend(dev
);
604 intel_suspend_gt_powersave(dev
);
606 intel_display_suspend(dev
);
608 intel_dp_mst_suspend(dev
);
610 intel_runtime_pm_disable_interrupts(dev_priv
);
611 intel_hpd_cancel_work(dev_priv
);
613 intel_suspend_encoders(dev_priv
);
615 intel_suspend_hw(dev
);
617 i915_gem_suspend_gtt_mappings(dev
);
619 i915_save_state(dev
);
621 opregion_target_state
= suspend_to_idle(dev_priv
) ? PCI_D1
: PCI_D3cold
;
622 intel_opregion_notify_adapter(dev
, opregion_target_state
);
624 intel_uncore_forcewake_reset(dev
, false);
625 intel_opregion_fini(dev
);
627 intel_fbdev_set_suspend(dev
, FBINFO_STATE_SUSPENDED
, true);
629 dev_priv
->suspend_count
++;
631 intel_display_set_init_power(dev_priv
, false);
633 if (HAS_CSR(dev_priv
))
634 flush_work(&dev_priv
->csr
.work
);
637 enable_rpm_wakeref_asserts(dev_priv
);
642 static int i915_drm_suspend_late(struct drm_device
*drm_dev
, bool hibernation
)
644 struct drm_i915_private
*dev_priv
= drm_dev
->dev_private
;
648 disable_rpm_wakeref_asserts(dev_priv
);
650 fw_csr
= suspend_to_idle(dev_priv
) && dev_priv
->csr
.dmc_payload
;
652 * In case of firmware assisted context save/restore don't manually
653 * deinit the power domains. This also means the CSR/DMC firmware will
654 * stay active, it will power down any HW resources as required and
655 * also enable deeper system power states that would be blocked if the
656 * firmware was inactive.
659 intel_power_domains_suspend(dev_priv
);
661 ret
= intel_suspend_complete(dev_priv
);
664 DRM_ERROR("Suspend complete failed: %d\n", ret
);
666 intel_power_domains_init_hw(dev_priv
, true);
671 pci_disable_device(drm_dev
->pdev
);
673 * During hibernation on some platforms the BIOS may try to access
674 * the device even though it's already in D3 and hang the machine. So
675 * leave the device in D0 on those platforms and hope the BIOS will
676 * power down the device properly. The issue was seen on multiple old
677 * GENs with different BIOS vendors, so having an explicit blacklist
678 * is inpractical; apply the workaround on everything pre GEN6. The
679 * platforms where the issue was seen:
680 * Lenovo Thinkpad X301, X61s, X60, T60, X41
684 if (!(hibernation
&& INTEL_INFO(dev_priv
)->gen
< 6))
685 pci_set_power_state(drm_dev
->pdev
, PCI_D3hot
);
687 dev_priv
->suspended_to_idle
= suspend_to_idle(dev_priv
);
690 enable_rpm_wakeref_asserts(dev_priv
);
695 int i915_suspend_switcheroo(struct drm_device
*dev
, pm_message_t state
)
699 if (!dev
|| !dev
->dev_private
) {
700 DRM_ERROR("dev: %p\n", dev
);
701 DRM_ERROR("DRM not initialized, aborting suspend.\n");
705 if (WARN_ON_ONCE(state
.event
!= PM_EVENT_SUSPEND
&&
706 state
.event
!= PM_EVENT_FREEZE
))
709 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
712 error
= i915_drm_suspend(dev
);
716 return i915_drm_suspend_late(dev
, false);
719 static int i915_drm_resume(struct drm_device
*dev
)
721 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
723 disable_rpm_wakeref_asserts(dev_priv
);
725 mutex_lock(&dev
->struct_mutex
);
726 i915_gem_restore_gtt_mappings(dev
);
727 mutex_unlock(&dev
->struct_mutex
);
729 i915_restore_state(dev
);
730 intel_opregion_setup(dev
);
732 intel_init_pch_refclk(dev
);
733 drm_mode_config_reset(dev
);
736 * Interrupts have to be enabled before any batches are run. If not the
737 * GPU will hang. i915_gem_init_hw() will initiate batches to
738 * update/restore the context.
740 * Modeset enabling in intel_modeset_init_hw() also needs working
743 intel_runtime_pm_enable_interrupts(dev_priv
);
745 mutex_lock(&dev
->struct_mutex
);
746 if (i915_gem_init_hw(dev
)) {
747 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
748 atomic_or(I915_WEDGED
, &dev_priv
->gpu_error
.reset_counter
);
750 mutex_unlock(&dev
->struct_mutex
);
752 intel_guc_resume(dev
);
754 intel_modeset_init_hw(dev
);
756 spin_lock_irq(&dev_priv
->irq_lock
);
757 if (dev_priv
->display
.hpd_irq_setup
)
758 dev_priv
->display
.hpd_irq_setup(dev
);
759 spin_unlock_irq(&dev_priv
->irq_lock
);
761 intel_dp_mst_resume(dev
);
763 intel_display_resume(dev
);
766 * ... but also need to make sure that hotplug processing
767 * doesn't cause havoc. Like in the driver load code we don't
768 * bother with the tiny race here where we might loose hotplug
771 intel_hpd_init(dev_priv
);
772 /* Config may have changed between suspend and resume */
773 drm_helper_hpd_irq_event(dev
);
775 intel_opregion_init(dev
);
777 intel_fbdev_set_suspend(dev
, FBINFO_STATE_RUNNING
, false);
779 mutex_lock(&dev_priv
->modeset_restore_lock
);
780 dev_priv
->modeset_restore
= MODESET_DONE
;
781 mutex_unlock(&dev_priv
->modeset_restore_lock
);
783 intel_opregion_notify_adapter(dev
, PCI_D0
);
785 drm_kms_helper_poll_enable(dev
);
787 enable_rpm_wakeref_asserts(dev_priv
);
792 static int i915_drm_resume_early(struct drm_device
*dev
)
794 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
798 * We have a resume ordering issue with the snd-hda driver also
799 * requiring our device to be power up. Due to the lack of a
800 * parent/child relationship we currently solve this with an early
803 * FIXME: This should be solved with a special hdmi sink device or
804 * similar so that power domains can be employed.
806 if (pci_enable_device(dev
->pdev
)) {
811 pci_set_master(dev
->pdev
);
813 disable_rpm_wakeref_asserts(dev_priv
);
815 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
816 ret
= vlv_resume_prepare(dev_priv
, false);
818 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
821 intel_uncore_early_sanitize(dev
, true);
824 ret
= bxt_resume_prepare(dev_priv
);
825 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
826 hsw_disable_pc8(dev_priv
);
828 intel_uncore_sanitize(dev
);
830 if (!(dev_priv
->suspended_to_idle
&& dev_priv
->csr
.dmc_payload
))
831 intel_power_domains_init_hw(dev_priv
, true);
834 dev_priv
->suspended_to_idle
= false;
836 enable_rpm_wakeref_asserts(dev_priv
);
841 int i915_resume_switcheroo(struct drm_device
*dev
)
845 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
848 ret
= i915_drm_resume_early(dev
);
852 return i915_drm_resume(dev
);
856 * i915_reset - reset chip after a hang
857 * @dev: drm device to reset
859 * Reset the chip. Useful if a hang is detected. Returns zero on successful
860 * reset or otherwise an error code.
862 * Procedure is fairly simple:
863 * - reset the chip using the reset reg
864 * - re-init context state
865 * - re-init hardware status page
866 * - re-init ring buffer
867 * - re-init interrupt state
870 int i915_reset(struct drm_device
*dev
)
872 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
876 intel_reset_gt_powersave(dev
);
878 mutex_lock(&dev
->struct_mutex
);
882 simulated
= dev_priv
->gpu_error
.stop_rings
!= 0;
884 ret
= intel_gpu_reset(dev
);
886 /* Also reset the gpu hangman. */
888 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
889 dev_priv
->gpu_error
.stop_rings
= 0;
890 if (ret
== -ENODEV
) {
891 DRM_INFO("Reset not implemented, but ignoring "
892 "error for simulated gpu hangs\n");
897 if (i915_stop_ring_allow_warn(dev_priv
))
898 pr_notice("drm/i915: Resetting chip after gpu hang\n");
901 DRM_ERROR("Failed to reset chip: %i\n", ret
);
902 mutex_unlock(&dev
->struct_mutex
);
906 intel_overlay_reset(dev_priv
);
908 /* Ok, now get things going again... */
911 * Everything depends on having the GTT running, so we need to start
912 * there. Fortunately we don't need to do this unless we reset the
913 * chip at a PCI level.
915 * Next we need to restore the context, but we don't use those
918 * Ring buffer needs to be re-initialized in the KMS case, or if X
919 * was running at the time of the reset (i.e. we weren't VT
923 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
924 dev_priv
->gpu_error
.reload_in_reset
= true;
926 ret
= i915_gem_init_hw(dev
);
928 dev_priv
->gpu_error
.reload_in_reset
= false;
930 mutex_unlock(&dev
->struct_mutex
);
932 DRM_ERROR("Failed hw init on reset %d\n", ret
);
937 * rps/rc6 re-init is necessary to restore state lost after the
938 * reset and the re-install of gt irqs. Skip for ironlake per
939 * previous concerns that it doesn't respond well to some forms
940 * of re-init after reset.
942 if (INTEL_INFO(dev
)->gen
> 5)
943 intel_enable_gt_powersave(dev
);
948 static int i915_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
950 struct intel_device_info
*intel_info
=
951 (struct intel_device_info
*) ent
->driver_data
;
953 if (IS_PRELIMINARY_HW(intel_info
) && !i915
.preliminary_hw_support
) {
954 DRM_INFO("This hardware requires preliminary hardware support.\n"
955 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
959 /* Only bind to function 0 of the device. Early generations
960 * used function 1 as a placeholder for multi-head. This causes
961 * us confusion instead, especially on the systems where both
962 * functions have the same PCI-ID!
964 if (PCI_FUNC(pdev
->devfn
))
968 * apple-gmux is needed on dual GPU MacBook Pro
969 * to probe the panel if we're the inactive GPU.
971 if (IS_ENABLED(CONFIG_VGA_ARB
) && IS_ENABLED(CONFIG_VGA_SWITCHEROO
) &&
972 apple_gmux_present() && pdev
!= vga_default_device() &&
973 !vga_switcheroo_handler_flags())
974 return -EPROBE_DEFER
;
976 return drm_get_pci_dev(pdev
, ent
, &driver
);
980 i915_pci_remove(struct pci_dev
*pdev
)
982 struct drm_device
*dev
= pci_get_drvdata(pdev
);
987 static int i915_pm_suspend(struct device
*dev
)
989 struct pci_dev
*pdev
= to_pci_dev(dev
);
990 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
992 if (!drm_dev
|| !drm_dev
->dev_private
) {
993 dev_err(dev
, "DRM not initialized, aborting suspend.\n");
997 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1000 return i915_drm_suspend(drm_dev
);
1003 static int i915_pm_suspend_late(struct device
*dev
)
1005 struct drm_device
*drm_dev
= dev_to_i915(dev
)->dev
;
1008 * We have a suspend ordering issue with the snd-hda driver also
1009 * requiring our device to be power up. Due to the lack of a
1010 * parent/child relationship we currently solve this with an late
1013 * FIXME: This should be solved with a special hdmi sink device or
1014 * similar so that power domains can be employed.
1016 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1019 return i915_drm_suspend_late(drm_dev
, false);
1022 static int i915_pm_poweroff_late(struct device
*dev
)
1024 struct drm_device
*drm_dev
= dev_to_i915(dev
)->dev
;
1026 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1029 return i915_drm_suspend_late(drm_dev
, true);
1032 static int i915_pm_resume_early(struct device
*dev
)
1034 struct drm_device
*drm_dev
= dev_to_i915(dev
)->dev
;
1036 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1039 return i915_drm_resume_early(drm_dev
);
1042 static int i915_pm_resume(struct device
*dev
)
1044 struct drm_device
*drm_dev
= dev_to_i915(dev
)->dev
;
1046 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1049 return i915_drm_resume(drm_dev
);
1052 static int hsw_suspend_complete(struct drm_i915_private
*dev_priv
)
1054 hsw_enable_pc8(dev_priv
);
1059 static int bxt_suspend_complete(struct drm_i915_private
*dev_priv
)
1061 struct drm_device
*dev
= dev_priv
->dev
;
1063 /* TODO: when DC5 support is added disable DC5 here. */
1065 broxton_ddi_phy_uninit(dev
);
1066 broxton_uninit_cdclk(dev
);
1067 bxt_enable_dc9(dev_priv
);
1072 static int bxt_resume_prepare(struct drm_i915_private
*dev_priv
)
1074 struct drm_device
*dev
= dev_priv
->dev
;
1076 /* TODO: when CSR FW support is added make sure the FW is loaded */
1078 bxt_disable_dc9(dev_priv
);
1081 * TODO: when DC5 support is added enable DC5 here if the CSR FW
1084 broxton_init_cdclk(dev
);
1085 broxton_ddi_phy_init(dev
);
1091 * Save all Gunit registers that may be lost after a D3 and a subsequent
1092 * S0i[R123] transition. The list of registers needing a save/restore is
1093 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1094 * registers in the following way:
1095 * - Driver: saved/restored by the driver
1096 * - Punit : saved/restored by the Punit firmware
1097 * - No, w/o marking: no need to save/restore, since the register is R/O or
1098 * used internally by the HW in a way that doesn't depend
1099 * keeping the content across a suspend/resume.
1100 * - Debug : used for debugging
1102 * We save/restore all registers marked with 'Driver', with the following
1104 * - Registers out of use, including also registers marked with 'Debug'.
1105 * These have no effect on the driver's operation, so we don't save/restore
1106 * them to reduce the overhead.
1107 * - Registers that are fully setup by an initialization function called from
1108 * the resume path. For example many clock gating and RPS/RC6 registers.
1109 * - Registers that provide the right functionality with their reset defaults.
1111 * TODO: Except for registers that based on the above 3 criteria can be safely
1112 * ignored, we save/restore all others, practically treating the HW context as
1113 * a black-box for the driver. Further investigation is needed to reduce the
1114 * saved/restored registers even further, by following the same 3 criteria.
1116 static void vlv_save_gunit_s0ix_state(struct drm_i915_private
*dev_priv
)
1118 struct vlv_s0ix_state
*s
= &dev_priv
->vlv_s0ix_state
;
1121 /* GAM 0x4000-0x4770 */
1122 s
->wr_watermark
= I915_READ(GEN7_WR_WATERMARK
);
1123 s
->gfx_prio_ctrl
= I915_READ(GEN7_GFX_PRIO_CTRL
);
1124 s
->arb_mode
= I915_READ(ARB_MODE
);
1125 s
->gfx_pend_tlb0
= I915_READ(GEN7_GFX_PEND_TLB0
);
1126 s
->gfx_pend_tlb1
= I915_READ(GEN7_GFX_PEND_TLB1
);
1128 for (i
= 0; i
< ARRAY_SIZE(s
->lra_limits
); i
++)
1129 s
->lra_limits
[i
] = I915_READ(GEN7_LRA_LIMITS(i
));
1131 s
->media_max_req_count
= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT
);
1132 s
->gfx_max_req_count
= I915_READ(GEN7_GFX_MAX_REQ_COUNT
);
1134 s
->render_hwsp
= I915_READ(RENDER_HWS_PGA_GEN7
);
1135 s
->ecochk
= I915_READ(GAM_ECOCHK
);
1136 s
->bsd_hwsp
= I915_READ(BSD_HWS_PGA_GEN7
);
1137 s
->blt_hwsp
= I915_READ(BLT_HWS_PGA_GEN7
);
1139 s
->tlb_rd_addr
= I915_READ(GEN7_TLB_RD_ADDR
);
1141 /* MBC 0x9024-0x91D0, 0x8500 */
1142 s
->g3dctl
= I915_READ(VLV_G3DCTL
);
1143 s
->gsckgctl
= I915_READ(VLV_GSCKGCTL
);
1144 s
->mbctl
= I915_READ(GEN6_MBCTL
);
1146 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1147 s
->ucgctl1
= I915_READ(GEN6_UCGCTL1
);
1148 s
->ucgctl3
= I915_READ(GEN6_UCGCTL3
);
1149 s
->rcgctl1
= I915_READ(GEN6_RCGCTL1
);
1150 s
->rcgctl2
= I915_READ(GEN6_RCGCTL2
);
1151 s
->rstctl
= I915_READ(GEN6_RSTCTL
);
1152 s
->misccpctl
= I915_READ(GEN7_MISCCPCTL
);
1154 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1155 s
->gfxpause
= I915_READ(GEN6_GFXPAUSE
);
1156 s
->rpdeuhwtc
= I915_READ(GEN6_RPDEUHWTC
);
1157 s
->rpdeuc
= I915_READ(GEN6_RPDEUC
);
1158 s
->ecobus
= I915_READ(ECOBUS
);
1159 s
->pwrdwnupctl
= I915_READ(VLV_PWRDWNUPCTL
);
1160 s
->rp_down_timeout
= I915_READ(GEN6_RP_DOWN_TIMEOUT
);
1161 s
->rp_deucsw
= I915_READ(GEN6_RPDEUCSW
);
1162 s
->rcubmabdtmr
= I915_READ(GEN6_RCUBMABDTMR
);
1163 s
->rcedata
= I915_READ(VLV_RCEDATA
);
1164 s
->spare2gh
= I915_READ(VLV_SPAREG2H
);
1166 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1167 s
->gt_imr
= I915_READ(GTIMR
);
1168 s
->gt_ier
= I915_READ(GTIER
);
1169 s
->pm_imr
= I915_READ(GEN6_PMIMR
);
1170 s
->pm_ier
= I915_READ(GEN6_PMIER
);
1172 for (i
= 0; i
< ARRAY_SIZE(s
->gt_scratch
); i
++)
1173 s
->gt_scratch
[i
] = I915_READ(GEN7_GT_SCRATCH(i
));
1175 /* GT SA CZ domain, 0x100000-0x138124 */
1176 s
->tilectl
= I915_READ(TILECTL
);
1177 s
->gt_fifoctl
= I915_READ(GTFIFOCTL
);
1178 s
->gtlc_wake_ctrl
= I915_READ(VLV_GTLC_WAKE_CTRL
);
1179 s
->gtlc_survive
= I915_READ(VLV_GTLC_SURVIVABILITY_REG
);
1180 s
->pmwgicz
= I915_READ(VLV_PMWGICZ
);
1182 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1183 s
->gu_ctl0
= I915_READ(VLV_GU_CTL0
);
1184 s
->gu_ctl1
= I915_READ(VLV_GU_CTL1
);
1185 s
->pcbr
= I915_READ(VLV_PCBR
);
1186 s
->clock_gate_dis2
= I915_READ(VLV_GUNIT_CLOCK_GATE2
);
1189 * Not saving any of:
1190 * DFT, 0x9800-0x9EC0
1191 * SARB, 0xB000-0xB1FC
1192 * GAC, 0x5208-0x524C, 0x14000-0x14C000
1197 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private
*dev_priv
)
1199 struct vlv_s0ix_state
*s
= &dev_priv
->vlv_s0ix_state
;
1203 /* GAM 0x4000-0x4770 */
1204 I915_WRITE(GEN7_WR_WATERMARK
, s
->wr_watermark
);
1205 I915_WRITE(GEN7_GFX_PRIO_CTRL
, s
->gfx_prio_ctrl
);
1206 I915_WRITE(ARB_MODE
, s
->arb_mode
| (0xffff << 16));
1207 I915_WRITE(GEN7_GFX_PEND_TLB0
, s
->gfx_pend_tlb0
);
1208 I915_WRITE(GEN7_GFX_PEND_TLB1
, s
->gfx_pend_tlb1
);
1210 for (i
= 0; i
< ARRAY_SIZE(s
->lra_limits
); i
++)
1211 I915_WRITE(GEN7_LRA_LIMITS(i
), s
->lra_limits
[i
]);
1213 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT
, s
->media_max_req_count
);
1214 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT
, s
->gfx_max_req_count
);
1216 I915_WRITE(RENDER_HWS_PGA_GEN7
, s
->render_hwsp
);
1217 I915_WRITE(GAM_ECOCHK
, s
->ecochk
);
1218 I915_WRITE(BSD_HWS_PGA_GEN7
, s
->bsd_hwsp
);
1219 I915_WRITE(BLT_HWS_PGA_GEN7
, s
->blt_hwsp
);
1221 I915_WRITE(GEN7_TLB_RD_ADDR
, s
->tlb_rd_addr
);
1223 /* MBC 0x9024-0x91D0, 0x8500 */
1224 I915_WRITE(VLV_G3DCTL
, s
->g3dctl
);
1225 I915_WRITE(VLV_GSCKGCTL
, s
->gsckgctl
);
1226 I915_WRITE(GEN6_MBCTL
, s
->mbctl
);
1228 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1229 I915_WRITE(GEN6_UCGCTL1
, s
->ucgctl1
);
1230 I915_WRITE(GEN6_UCGCTL3
, s
->ucgctl3
);
1231 I915_WRITE(GEN6_RCGCTL1
, s
->rcgctl1
);
1232 I915_WRITE(GEN6_RCGCTL2
, s
->rcgctl2
);
1233 I915_WRITE(GEN6_RSTCTL
, s
->rstctl
);
1234 I915_WRITE(GEN7_MISCCPCTL
, s
->misccpctl
);
1236 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1237 I915_WRITE(GEN6_GFXPAUSE
, s
->gfxpause
);
1238 I915_WRITE(GEN6_RPDEUHWTC
, s
->rpdeuhwtc
);
1239 I915_WRITE(GEN6_RPDEUC
, s
->rpdeuc
);
1240 I915_WRITE(ECOBUS
, s
->ecobus
);
1241 I915_WRITE(VLV_PWRDWNUPCTL
, s
->pwrdwnupctl
);
1242 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
,s
->rp_down_timeout
);
1243 I915_WRITE(GEN6_RPDEUCSW
, s
->rp_deucsw
);
1244 I915_WRITE(GEN6_RCUBMABDTMR
, s
->rcubmabdtmr
);
1245 I915_WRITE(VLV_RCEDATA
, s
->rcedata
);
1246 I915_WRITE(VLV_SPAREG2H
, s
->spare2gh
);
1248 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1249 I915_WRITE(GTIMR
, s
->gt_imr
);
1250 I915_WRITE(GTIER
, s
->gt_ier
);
1251 I915_WRITE(GEN6_PMIMR
, s
->pm_imr
);
1252 I915_WRITE(GEN6_PMIER
, s
->pm_ier
);
1254 for (i
= 0; i
< ARRAY_SIZE(s
->gt_scratch
); i
++)
1255 I915_WRITE(GEN7_GT_SCRATCH(i
), s
->gt_scratch
[i
]);
1257 /* GT SA CZ domain, 0x100000-0x138124 */
1258 I915_WRITE(TILECTL
, s
->tilectl
);
1259 I915_WRITE(GTFIFOCTL
, s
->gt_fifoctl
);
1261 * Preserve the GT allow wake and GFX force clock bit, they are not
1262 * be restored, as they are used to control the s0ix suspend/resume
1263 * sequence by the caller.
1265 val
= I915_READ(VLV_GTLC_WAKE_CTRL
);
1266 val
&= VLV_GTLC_ALLOWWAKEREQ
;
1267 val
|= s
->gtlc_wake_ctrl
& ~VLV_GTLC_ALLOWWAKEREQ
;
1268 I915_WRITE(VLV_GTLC_WAKE_CTRL
, val
);
1270 val
= I915_READ(VLV_GTLC_SURVIVABILITY_REG
);
1271 val
&= VLV_GFX_CLK_FORCE_ON_BIT
;
1272 val
|= s
->gtlc_survive
& ~VLV_GFX_CLK_FORCE_ON_BIT
;
1273 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG
, val
);
1275 I915_WRITE(VLV_PMWGICZ
, s
->pmwgicz
);
1277 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1278 I915_WRITE(VLV_GU_CTL0
, s
->gu_ctl0
);
1279 I915_WRITE(VLV_GU_CTL1
, s
->gu_ctl1
);
1280 I915_WRITE(VLV_PCBR
, s
->pcbr
);
1281 I915_WRITE(VLV_GUNIT_CLOCK_GATE2
, s
->clock_gate_dis2
);
1284 int vlv_force_gfx_clock(struct drm_i915_private
*dev_priv
, bool force_on
)
1289 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1291 val
= I915_READ(VLV_GTLC_SURVIVABILITY_REG
);
1292 val
&= ~VLV_GFX_CLK_FORCE_ON_BIT
;
1294 val
|= VLV_GFX_CLK_FORCE_ON_BIT
;
1295 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG
, val
);
1300 err
= wait_for(COND
, 20);
1302 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1303 I915_READ(VLV_GTLC_SURVIVABILITY_REG
));
1309 static int vlv_allow_gt_wake(struct drm_i915_private
*dev_priv
, bool allow
)
1314 val
= I915_READ(VLV_GTLC_WAKE_CTRL
);
1315 val
&= ~VLV_GTLC_ALLOWWAKEREQ
;
1317 val
|= VLV_GTLC_ALLOWWAKEREQ
;
1318 I915_WRITE(VLV_GTLC_WAKE_CTRL
, val
);
1319 POSTING_READ(VLV_GTLC_WAKE_CTRL
);
1321 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1323 err
= wait_for(COND
, 1);
1325 DRM_ERROR("timeout disabling GT waking\n");
1330 static int vlv_wait_for_gt_wells(struct drm_i915_private
*dev_priv
,
1337 mask
= VLV_GTLC_PW_MEDIA_STATUS_MASK
| VLV_GTLC_PW_RENDER_STATUS_MASK
;
1338 val
= wait_for_on
? mask
: 0;
1339 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1343 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1345 I915_READ(VLV_GTLC_PW_STATUS
));
1348 * RC6 transitioning can be delayed up to 2 msec (see
1349 * valleyview_enable_rps), use 3 msec for safety.
1351 err
= wait_for(COND
, 3);
1353 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1354 onoff(wait_for_on
));
1360 static void vlv_check_no_gt_access(struct drm_i915_private
*dev_priv
)
1362 if (!(I915_READ(VLV_GTLC_PW_STATUS
) & VLV_GTLC_ALLOWWAKEERR
))
1365 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
1366 I915_WRITE(VLV_GTLC_PW_STATUS
, VLV_GTLC_ALLOWWAKEERR
);
1369 static int vlv_suspend_complete(struct drm_i915_private
*dev_priv
)
1375 * Bspec defines the following GT well on flags as debug only, so
1376 * don't treat them as hard failures.
1378 (void)vlv_wait_for_gt_wells(dev_priv
, false);
1380 mask
= VLV_GTLC_RENDER_CTX_EXISTS
| VLV_GTLC_MEDIA_CTX_EXISTS
;
1381 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL
) & mask
) != mask
);
1383 vlv_check_no_gt_access(dev_priv
);
1385 err
= vlv_force_gfx_clock(dev_priv
, true);
1389 err
= vlv_allow_gt_wake(dev_priv
, false);
1393 if (!IS_CHERRYVIEW(dev_priv
->dev
))
1394 vlv_save_gunit_s0ix_state(dev_priv
);
1396 err
= vlv_force_gfx_clock(dev_priv
, false);
1403 /* For safety always re-enable waking and disable gfx clock forcing */
1404 vlv_allow_gt_wake(dev_priv
, true);
1406 vlv_force_gfx_clock(dev_priv
, false);
1411 static int vlv_resume_prepare(struct drm_i915_private
*dev_priv
,
1414 struct drm_device
*dev
= dev_priv
->dev
;
1419 * If any of the steps fail just try to continue, that's the best we
1420 * can do at this point. Return the first error code (which will also
1421 * leave RPM permanently disabled).
1423 ret
= vlv_force_gfx_clock(dev_priv
, true);
1425 if (!IS_CHERRYVIEW(dev_priv
->dev
))
1426 vlv_restore_gunit_s0ix_state(dev_priv
);
1428 err
= vlv_allow_gt_wake(dev_priv
, true);
1432 err
= vlv_force_gfx_clock(dev_priv
, false);
1436 vlv_check_no_gt_access(dev_priv
);
1439 intel_init_clock_gating(dev
);
1440 i915_gem_restore_fences(dev
);
1446 static int intel_runtime_suspend(struct device
*device
)
1448 struct pci_dev
*pdev
= to_pci_dev(device
);
1449 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1450 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1453 if (WARN_ON_ONCE(!(dev_priv
->rps
.enabled
&& intel_enable_rc6(dev
))))
1456 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev
)))
1459 DRM_DEBUG_KMS("Suspending device\n");
1462 * We could deadlock here in case another thread holding struct_mutex
1463 * calls RPM suspend concurrently, since the RPM suspend will wait
1464 * first for this RPM suspend to finish. In this case the concurrent
1465 * RPM resume will be followed by its RPM suspend counterpart. Still
1466 * for consistency return -EAGAIN, which will reschedule this suspend.
1468 if (!mutex_trylock(&dev
->struct_mutex
)) {
1469 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1471 * Bump the expiration timestamp, otherwise the suspend won't
1474 pm_runtime_mark_last_busy(device
);
1479 disable_rpm_wakeref_asserts(dev_priv
);
1482 * We are safe here against re-faults, since the fault handler takes
1485 i915_gem_release_all_mmaps(dev_priv
);
1486 mutex_unlock(&dev
->struct_mutex
);
1488 cancel_delayed_work_sync(&dev_priv
->gpu_error
.hangcheck_work
);
1490 intel_guc_suspend(dev
);
1492 intel_suspend_gt_powersave(dev
);
1493 intel_runtime_pm_disable_interrupts(dev_priv
);
1495 ret
= intel_suspend_complete(dev_priv
);
1497 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret
);
1498 intel_runtime_pm_enable_interrupts(dev_priv
);
1500 enable_rpm_wakeref_asserts(dev_priv
);
1505 intel_uncore_forcewake_reset(dev
, false);
1507 enable_rpm_wakeref_asserts(dev_priv
);
1508 WARN_ON_ONCE(atomic_read(&dev_priv
->pm
.wakeref_count
));
1510 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv
))
1511 DRM_ERROR("Unclaimed access detected prior to suspending\n");
1513 dev_priv
->pm
.suspended
= true;
1516 * FIXME: We really should find a document that references the arguments
1519 if (IS_BROADWELL(dev
)) {
1521 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1522 * being detected, and the call we do at intel_runtime_resume()
1523 * won't be able to restore them. Since PCI_D3hot matches the
1524 * actual specification and appears to be working, use it.
1526 intel_opregion_notify_adapter(dev
, PCI_D3hot
);
1529 * current versions of firmware which depend on this opregion
1530 * notification have repurposed the D1 definition to mean
1531 * "runtime suspended" vs. what you would normally expect (D3)
1532 * to distinguish it from notifications that might be sent via
1535 intel_opregion_notify_adapter(dev
, PCI_D1
);
1538 assert_forcewakes_inactive(dev_priv
);
1540 DRM_DEBUG_KMS("Device suspended\n");
1544 static int intel_runtime_resume(struct device
*device
)
1546 struct pci_dev
*pdev
= to_pci_dev(device
);
1547 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1548 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1551 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev
)))
1554 DRM_DEBUG_KMS("Resuming device\n");
1556 WARN_ON_ONCE(atomic_read(&dev_priv
->pm
.wakeref_count
));
1557 disable_rpm_wakeref_asserts(dev_priv
);
1559 intel_opregion_notify_adapter(dev
, PCI_D0
);
1560 dev_priv
->pm
.suspended
= false;
1561 if (intel_uncore_unclaimed_mmio(dev_priv
))
1562 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
1564 intel_guc_resume(dev
);
1566 if (IS_GEN6(dev_priv
))
1567 intel_init_pch_refclk(dev
);
1569 if (IS_BROXTON(dev
))
1570 ret
= bxt_resume_prepare(dev_priv
);
1571 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
1572 hsw_disable_pc8(dev_priv
);
1573 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
1574 ret
= vlv_resume_prepare(dev_priv
, true);
1577 * No point of rolling back things in case of an error, as the best
1578 * we can do is to hope that things will still work (and disable RPM).
1580 i915_gem_init_swizzling(dev
);
1581 gen6_update_ring_freq(dev
);
1583 intel_runtime_pm_enable_interrupts(dev_priv
);
1586 * On VLV/CHV display interrupts are part of the display
1587 * power well, so hpd is reinitialized from there. For
1588 * everyone else do it here.
1590 if (!IS_VALLEYVIEW(dev_priv
) && !IS_CHERRYVIEW(dev_priv
))
1591 intel_hpd_init(dev_priv
);
1593 intel_enable_gt_powersave(dev
);
1595 enable_rpm_wakeref_asserts(dev_priv
);
1598 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret
);
1600 DRM_DEBUG_KMS("Device resumed\n");
1606 * This function implements common functionality of runtime and system
1609 static int intel_suspend_complete(struct drm_i915_private
*dev_priv
)
1613 if (IS_BROXTON(dev_priv
))
1614 ret
= bxt_suspend_complete(dev_priv
);
1615 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
1616 ret
= hsw_suspend_complete(dev_priv
);
1617 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
1618 ret
= vlv_suspend_complete(dev_priv
);
1625 static const struct dev_pm_ops i915_pm_ops
= {
1627 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1630 .suspend
= i915_pm_suspend
,
1631 .suspend_late
= i915_pm_suspend_late
,
1632 .resume_early
= i915_pm_resume_early
,
1633 .resume
= i915_pm_resume
,
1637 * @freeze, @freeze_late : called (1) before creating the
1638 * hibernation image [PMSG_FREEZE] and
1639 * (2) after rebooting, before restoring
1640 * the image [PMSG_QUIESCE]
1641 * @thaw, @thaw_early : called (1) after creating the hibernation
1642 * image, before writing it [PMSG_THAW]
1643 * and (2) after failing to create or
1644 * restore the image [PMSG_RECOVER]
1645 * @poweroff, @poweroff_late: called after writing the hibernation
1646 * image, before rebooting [PMSG_HIBERNATE]
1647 * @restore, @restore_early : called after rebooting and restoring the
1648 * hibernation image [PMSG_RESTORE]
1650 .freeze
= i915_pm_suspend
,
1651 .freeze_late
= i915_pm_suspend_late
,
1652 .thaw_early
= i915_pm_resume_early
,
1653 .thaw
= i915_pm_resume
,
1654 .poweroff
= i915_pm_suspend
,
1655 .poweroff_late
= i915_pm_poweroff_late
,
1656 .restore_early
= i915_pm_resume_early
,
1657 .restore
= i915_pm_resume
,
1659 /* S0ix (via runtime suspend) event handlers */
1660 .runtime_suspend
= intel_runtime_suspend
,
1661 .runtime_resume
= intel_runtime_resume
,
1664 static const struct vm_operations_struct i915_gem_vm_ops
= {
1665 .fault
= i915_gem_fault
,
1666 .open
= drm_gem_vm_open
,
1667 .close
= drm_gem_vm_close
,
1670 static const struct file_operations i915_driver_fops
= {
1671 .owner
= THIS_MODULE
,
1673 .release
= drm_release
,
1674 .unlocked_ioctl
= drm_ioctl
,
1675 .mmap
= drm_gem_mmap
,
1678 #ifdef CONFIG_COMPAT
1679 .compat_ioctl
= i915_compat_ioctl
,
1681 .llseek
= noop_llseek
,
1684 static struct drm_driver driver
= {
1685 /* Don't use MTRRs here; the Xserver or userspace app should
1686 * deal with them for Intel hardware.
1689 DRIVER_HAVE_IRQ
| DRIVER_IRQ_SHARED
| DRIVER_GEM
| DRIVER_PRIME
|
1690 DRIVER_RENDER
| DRIVER_MODESET
,
1691 .load
= i915_driver_load
,
1692 .unload
= i915_driver_unload
,
1693 .open
= i915_driver_open
,
1694 .lastclose
= i915_driver_lastclose
,
1695 .preclose
= i915_driver_preclose
,
1696 .postclose
= i915_driver_postclose
,
1697 .set_busid
= drm_pci_set_busid
,
1699 #if defined(CONFIG_DEBUG_FS)
1700 .debugfs_init
= i915_debugfs_init
,
1701 .debugfs_cleanup
= i915_debugfs_cleanup
,
1703 .gem_free_object
= i915_gem_free_object
,
1704 .gem_vm_ops
= &i915_gem_vm_ops
,
1706 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
1707 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
1708 .gem_prime_export
= i915_gem_prime_export
,
1709 .gem_prime_import
= i915_gem_prime_import
,
1711 .dumb_create
= i915_gem_dumb_create
,
1712 .dumb_map_offset
= i915_gem_mmap_gtt
,
1713 .dumb_destroy
= drm_gem_dumb_destroy
,
1714 .ioctls
= i915_ioctls
,
1715 .fops
= &i915_driver_fops
,
1716 .name
= DRIVER_NAME
,
1717 .desc
= DRIVER_DESC
,
1718 .date
= DRIVER_DATE
,
1719 .major
= DRIVER_MAJOR
,
1720 .minor
= DRIVER_MINOR
,
1721 .patchlevel
= DRIVER_PATCHLEVEL
,
1724 static struct pci_driver i915_pci_driver
= {
1725 .name
= DRIVER_NAME
,
1726 .id_table
= pciidlist
,
1727 .probe
= i915_pci_probe
,
1728 .remove
= i915_pci_remove
,
1729 .driver
.pm
= &i915_pm_ops
,
1732 static int __init
i915_init(void)
1734 driver
.num_ioctls
= i915_max_ioctl
;
1737 * Enable KMS by default, unless explicitly overriden by
1738 * either the i915.modeset prarameter or by the
1739 * vga_text_mode_force boot option.
1742 if (i915
.modeset
== 0)
1743 driver
.driver_features
&= ~DRIVER_MODESET
;
1745 #ifdef CONFIG_VGA_CONSOLE
1746 if (vgacon_text_force() && i915
.modeset
== -1)
1747 driver
.driver_features
&= ~DRIVER_MODESET
;
1750 if (!(driver
.driver_features
& DRIVER_MODESET
)) {
1751 /* Silently fail loading to not upset userspace. */
1752 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1756 if (i915
.nuclear_pageflip
)
1757 driver
.driver_features
|= DRIVER_ATOMIC
;
1759 return drm_pci_init(&driver
, &i915_pci_driver
);
1762 static void __exit
i915_exit(void)
1764 if (!(driver
.driver_features
& DRIVER_MODESET
))
1765 return; /* Never loaded a driver. */
1767 drm_pci_exit(&driver
, &i915_pci_driver
);
1770 module_init(i915_init
);
1771 module_exit(i915_exit
);
1773 MODULE_AUTHOR("Tungsten Graphics, Inc.");
1774 MODULE_AUTHOR("Intel Corporation");
1776 MODULE_DESCRIPTION(DRIVER_DESC
);
1777 MODULE_LICENSE("GPL and additional rights");