2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
28 #include <asm/iosf_mbi.h>
29 #include <linux/pm_runtime.h>
31 #define FORCEWAKE_ACK_TIMEOUT_MS 50
32 #define GT_FIFO_TIMEOUT_MS 10
34 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
36 static const char * const forcewake_domain_names
[] = {
43 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id
)
45 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names
) != FW_DOMAIN_ID_COUNT
);
47 if (id
>= 0 && id
< FW_DOMAIN_ID_COUNT
)
48 return forcewake_domain_names
[id
];
56 fw_domain_reset(struct drm_i915_private
*i915
,
57 const struct intel_uncore_forcewake_domain
*d
)
59 __raw_i915_write32(i915
, d
->reg_set
, i915
->uncore
.fw_reset
);
63 fw_domain_arm_timer(struct intel_uncore_forcewake_domain
*d
)
66 hrtimer_start_range_ns(&d
->timer
,
73 __wait_for_ack(const struct drm_i915_private
*i915
,
74 const struct intel_uncore_forcewake_domain
*d
,
78 return wait_for_atomic((__raw_i915_read32(i915
, d
->reg_ack
) & ack
) == value
,
79 FORCEWAKE_ACK_TIMEOUT_MS
);
83 wait_ack_clear(const struct drm_i915_private
*i915
,
84 const struct intel_uncore_forcewake_domain
*d
,
87 return __wait_for_ack(i915
, d
, ack
, 0);
91 wait_ack_set(const struct drm_i915_private
*i915
,
92 const struct intel_uncore_forcewake_domain
*d
,
95 return __wait_for_ack(i915
, d
, ack
, ack
);
99 fw_domain_wait_ack_clear(const struct drm_i915_private
*i915
,
100 const struct intel_uncore_forcewake_domain
*d
)
102 if (wait_ack_clear(i915
, d
, FORCEWAKE_KERNEL
))
103 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
104 intel_uncore_forcewake_domain_to_str(d
->id
));
113 fw_domain_wait_ack_with_fallback(const struct drm_i915_private
*i915
,
114 const struct intel_uncore_forcewake_domain
*d
,
115 const enum ack_type type
)
117 const u32 ack_bit
= FORCEWAKE_KERNEL
;
118 const u32 value
= type
== ACK_SET
? ack_bit
: 0;
123 * There is a possibility of driver's wake request colliding
124 * with hardware's own wake requests and that can cause
125 * hardware to not deliver the driver's ack message.
127 * Use a fallback bit toggle to kick the gpu state machine
128 * in the hope that the original ack will be delivered along with
131 * This workaround is described in HSDES #1604254524
136 wait_ack_clear(i915
, d
, FORCEWAKE_KERNEL_FALLBACK
);
138 __raw_i915_write32(i915
, d
->reg_set
,
139 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK
));
140 /* Give gt some time to relax before the polling frenzy */
142 wait_ack_set(i915
, d
, FORCEWAKE_KERNEL_FALLBACK
);
144 ack_detected
= (__raw_i915_read32(i915
, d
->reg_ack
) & ack_bit
) == value
;
146 __raw_i915_write32(i915
, d
->reg_set
,
147 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK
));
148 } while (!ack_detected
&& pass
++ < 10);
150 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
151 intel_uncore_forcewake_domain_to_str(d
->id
),
152 type
== ACK_SET
? "set" : "clear",
153 __raw_i915_read32(i915
, d
->reg_ack
),
156 return ack_detected
? 0 : -ETIMEDOUT
;
160 fw_domain_wait_ack_clear_fallback(const struct drm_i915_private
*i915
,
161 const struct intel_uncore_forcewake_domain
*d
)
163 if (likely(!wait_ack_clear(i915
, d
, FORCEWAKE_KERNEL
)))
166 if (fw_domain_wait_ack_with_fallback(i915
, d
, ACK_CLEAR
))
167 fw_domain_wait_ack_clear(i915
, d
);
171 fw_domain_get(struct drm_i915_private
*i915
,
172 const struct intel_uncore_forcewake_domain
*d
)
174 __raw_i915_write32(i915
, d
->reg_set
, i915
->uncore
.fw_set
);
178 fw_domain_wait_ack_set(const struct drm_i915_private
*i915
,
179 const struct intel_uncore_forcewake_domain
*d
)
181 if (wait_ack_set(i915
, d
, FORCEWAKE_KERNEL
))
182 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
183 intel_uncore_forcewake_domain_to_str(d
->id
));
187 fw_domain_wait_ack_set_fallback(const struct drm_i915_private
*i915
,
188 const struct intel_uncore_forcewake_domain
*d
)
190 if (likely(!wait_ack_set(i915
, d
, FORCEWAKE_KERNEL
)))
193 if (fw_domain_wait_ack_with_fallback(i915
, d
, ACK_SET
))
194 fw_domain_wait_ack_set(i915
, d
);
198 fw_domain_put(const struct drm_i915_private
*i915
,
199 const struct intel_uncore_forcewake_domain
*d
)
201 __raw_i915_write32(i915
, d
->reg_set
, i915
->uncore
.fw_clear
);
205 fw_domains_get(struct drm_i915_private
*i915
, enum forcewake_domains fw_domains
)
207 struct intel_uncore_forcewake_domain
*d
;
210 GEM_BUG_ON(fw_domains
& ~i915
->uncore
.fw_domains
);
212 for_each_fw_domain_masked(d
, fw_domains
, i915
, tmp
) {
213 fw_domain_wait_ack_clear(i915
, d
);
214 fw_domain_get(i915
, d
);
217 for_each_fw_domain_masked(d
, fw_domains
, i915
, tmp
)
218 fw_domain_wait_ack_set(i915
, d
);
220 i915
->uncore
.fw_domains_active
|= fw_domains
;
224 fw_domains_get_with_fallback(struct drm_i915_private
*i915
,
225 enum forcewake_domains fw_domains
)
227 struct intel_uncore_forcewake_domain
*d
;
230 GEM_BUG_ON(fw_domains
& ~i915
->uncore
.fw_domains
);
232 for_each_fw_domain_masked(d
, fw_domains
, i915
, tmp
) {
233 fw_domain_wait_ack_clear_fallback(i915
, d
);
234 fw_domain_get(i915
, d
);
237 for_each_fw_domain_masked(d
, fw_domains
, i915
, tmp
)
238 fw_domain_wait_ack_set_fallback(i915
, d
);
240 i915
->uncore
.fw_domains_active
|= fw_domains
;
244 fw_domains_put(struct drm_i915_private
*i915
, enum forcewake_domains fw_domains
)
246 struct intel_uncore_forcewake_domain
*d
;
249 GEM_BUG_ON(fw_domains
& ~i915
->uncore
.fw_domains
);
251 for_each_fw_domain_masked(d
, fw_domains
, i915
, tmp
)
252 fw_domain_put(i915
, d
);
254 i915
->uncore
.fw_domains_active
&= ~fw_domains
;
258 fw_domains_reset(struct drm_i915_private
*i915
,
259 enum forcewake_domains fw_domains
)
261 struct intel_uncore_forcewake_domain
*d
;
267 GEM_BUG_ON(fw_domains
& ~i915
->uncore
.fw_domains
);
269 for_each_fw_domain_masked(d
, fw_domains
, i915
, tmp
)
270 fw_domain_reset(i915
, d
);
273 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private
*dev_priv
)
275 /* w/a for a sporadic read returning 0 by waiting for the GT
278 if (wait_for_atomic_us((__raw_i915_read32(dev_priv
, GEN6_GT_THREAD_STATUS_REG
) &
279 GEN6_GT_THREAD_STATUS_CORE_MASK
) == 0, 500))
280 DRM_ERROR("GT thread status wait timed out\n");
283 static void fw_domains_get_with_thread_status(struct drm_i915_private
*dev_priv
,
284 enum forcewake_domains fw_domains
)
286 fw_domains_get(dev_priv
, fw_domains
);
288 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
289 __gen6_gt_wait_for_thread_c0(dev_priv
);
292 static inline u32
fifo_free_entries(struct drm_i915_private
*dev_priv
)
294 u32 count
= __raw_i915_read32(dev_priv
, GTFIFOCTL
);
296 return count
& GT_FIFO_FREE_ENTRIES_MASK
;
299 static void __gen6_gt_wait_for_fifo(struct drm_i915_private
*dev_priv
)
303 /* On VLV, FIFO will be shared by both SW and HW.
304 * So, we need to read the FREE_ENTRIES everytime */
305 if (IS_VALLEYVIEW(dev_priv
))
306 n
= fifo_free_entries(dev_priv
);
308 n
= dev_priv
->uncore
.fifo_count
;
310 if (n
<= GT_FIFO_NUM_RESERVED_ENTRIES
) {
311 if (wait_for_atomic((n
= fifo_free_entries(dev_priv
)) >
312 GT_FIFO_NUM_RESERVED_ENTRIES
,
313 GT_FIFO_TIMEOUT_MS
)) {
314 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n
);
319 dev_priv
->uncore
.fifo_count
= n
- 1;
322 static enum hrtimer_restart
323 intel_uncore_fw_release_timer(struct hrtimer
*timer
)
325 struct intel_uncore_forcewake_domain
*domain
=
326 container_of(timer
, struct intel_uncore_forcewake_domain
, timer
);
327 struct drm_i915_private
*dev_priv
=
328 container_of(domain
, struct drm_i915_private
, uncore
.fw_domain
[domain
->id
]);
329 unsigned long irqflags
;
331 assert_rpm_device_not_suspended(dev_priv
);
333 if (xchg(&domain
->active
, false))
334 return HRTIMER_RESTART
;
336 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
337 if (WARN_ON(domain
->wake_count
== 0))
338 domain
->wake_count
++;
340 if (--domain
->wake_count
== 0)
341 dev_priv
->uncore
.funcs
.force_wake_put(dev_priv
, domain
->mask
);
343 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
345 return HRTIMER_NORESTART
;
348 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
349 static void intel_uncore_forcewake_reset(struct drm_i915_private
*dev_priv
,
352 unsigned long irqflags
;
353 struct intel_uncore_forcewake_domain
*domain
;
354 int retry_count
= 100;
355 enum forcewake_domains fw
, active_domains
;
357 iosf_mbi_assert_punit_acquired();
359 /* Hold uncore.lock across reset to prevent any register access
360 * with forcewake not set correctly. Wait until all pending
361 * timers are run before holding.
368 for_each_fw_domain(domain
, dev_priv
, tmp
) {
369 smp_store_mb(domain
->active
, false);
370 if (hrtimer_cancel(&domain
->timer
) == 0)
373 intel_uncore_fw_release_timer(&domain
->timer
);
376 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
378 for_each_fw_domain(domain
, dev_priv
, tmp
) {
379 if (hrtimer_active(&domain
->timer
))
380 active_domains
|= domain
->mask
;
383 if (active_domains
== 0)
386 if (--retry_count
== 0) {
387 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
391 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
395 WARN_ON(active_domains
);
397 fw
= dev_priv
->uncore
.fw_domains_active
;
399 dev_priv
->uncore
.funcs
.force_wake_put(dev_priv
, fw
);
401 fw_domains_reset(dev_priv
, dev_priv
->uncore
.fw_domains
);
403 if (restore
) { /* If reset with a user forcewake, try to restore */
405 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw
);
407 if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
))
408 dev_priv
->uncore
.fifo_count
=
409 fifo_free_entries(dev_priv
);
413 assert_forcewakes_inactive(dev_priv
);
415 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
418 static u64
gen9_edram_size(struct drm_i915_private
*dev_priv
)
420 const unsigned int ways
[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
421 const unsigned int sets
[4] = { 1, 1, 2, 2 };
422 const u32 cap
= dev_priv
->edram_cap
;
424 return EDRAM_NUM_BANKS(cap
) *
425 ways
[EDRAM_WAYS_IDX(cap
)] *
426 sets
[EDRAM_SETS_IDX(cap
)] *
430 u64
intel_uncore_edram_size(struct drm_i915_private
*dev_priv
)
432 if (!HAS_EDRAM(dev_priv
))
435 /* The needed capability bits for size calculation
436 * are not there with pre gen9 so return 128MB always.
438 if (INTEL_GEN(dev_priv
) < 9)
439 return 128 * 1024 * 1024;
441 return gen9_edram_size(dev_priv
);
444 static void intel_uncore_edram_detect(struct drm_i915_private
*dev_priv
)
446 if (IS_HASWELL(dev_priv
) ||
447 IS_BROADWELL(dev_priv
) ||
448 INTEL_GEN(dev_priv
) >= 9) {
449 dev_priv
->edram_cap
= __raw_i915_read32(dev_priv
,
452 /* NB: We can't write IDICR yet because we do not have gt funcs
455 dev_priv
->edram_cap
= 0;
458 if (HAS_EDRAM(dev_priv
))
459 DRM_INFO("Found %lluMB of eDRAM\n",
460 intel_uncore_edram_size(dev_priv
) / (1024 * 1024));
464 fpga_check_for_unclaimed_mmio(struct drm_i915_private
*dev_priv
)
468 dbg
= __raw_i915_read32(dev_priv
, FPGA_DBG
);
469 if (likely(!(dbg
& FPGA_DBG_RM_NOCLAIM
)))
472 __raw_i915_write32(dev_priv
, FPGA_DBG
, FPGA_DBG_RM_NOCLAIM
);
478 vlv_check_for_unclaimed_mmio(struct drm_i915_private
*dev_priv
)
482 cer
= __raw_i915_read32(dev_priv
, CLAIM_ER
);
483 if (likely(!(cer
& (CLAIM_ER_OVERFLOW
| CLAIM_ER_CTR_MASK
))))
486 __raw_i915_write32(dev_priv
, CLAIM_ER
, CLAIM_ER_CLR
);
492 gen6_check_for_fifo_debug(struct drm_i915_private
*dev_priv
)
496 fifodbg
= __raw_i915_read32(dev_priv
, GTFIFODBG
);
498 if (unlikely(fifodbg
)) {
499 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg
);
500 __raw_i915_write32(dev_priv
, GTFIFODBG
, fifodbg
);
507 check_for_unclaimed_mmio(struct drm_i915_private
*dev_priv
)
511 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv
))
512 ret
|= fpga_check_for_unclaimed_mmio(dev_priv
);
514 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
515 ret
|= vlv_check_for_unclaimed_mmio(dev_priv
);
517 if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
))
518 ret
|= gen6_check_for_fifo_debug(dev_priv
);
523 static void __intel_uncore_early_sanitize(struct drm_i915_private
*dev_priv
,
524 bool restore_forcewake
)
526 /* clear out unclaimed reg detection bit */
527 if (check_for_unclaimed_mmio(dev_priv
))
528 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
530 /* WaDisableShadowRegForCpd:chv */
531 if (IS_CHERRYVIEW(dev_priv
)) {
532 __raw_i915_write32(dev_priv
, GTFIFOCTL
,
533 __raw_i915_read32(dev_priv
, GTFIFOCTL
) |
534 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL
|
535 GT_FIFO_CTL_RC6_POLICY_STALL
);
538 iosf_mbi_punit_acquire();
539 intel_uncore_forcewake_reset(dev_priv
, restore_forcewake
);
540 iosf_mbi_punit_release();
543 void intel_uncore_suspend(struct drm_i915_private
*dev_priv
)
545 iosf_mbi_punit_acquire();
546 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
547 &dev_priv
->uncore
.pmic_bus_access_nb
);
548 intel_uncore_forcewake_reset(dev_priv
, false);
549 iosf_mbi_punit_release();
552 void intel_uncore_resume_early(struct drm_i915_private
*dev_priv
)
554 __intel_uncore_early_sanitize(dev_priv
, true);
555 iosf_mbi_register_pmic_bus_access_notifier(
556 &dev_priv
->uncore
.pmic_bus_access_nb
);
557 i915_check_and_clear_faults(dev_priv
);
560 void intel_uncore_runtime_resume(struct drm_i915_private
*dev_priv
)
562 iosf_mbi_register_pmic_bus_access_notifier(
563 &dev_priv
->uncore
.pmic_bus_access_nb
);
566 void intel_uncore_sanitize(struct drm_i915_private
*dev_priv
)
568 /* BIOS often leaves RC6 enabled, but disable it for hw init */
569 intel_sanitize_gt_powersave(dev_priv
);
572 static void __intel_uncore_forcewake_get(struct drm_i915_private
*dev_priv
,
573 enum forcewake_domains fw_domains
)
575 struct intel_uncore_forcewake_domain
*domain
;
578 fw_domains
&= dev_priv
->uncore
.fw_domains
;
580 for_each_fw_domain_masked(domain
, fw_domains
, dev_priv
, tmp
) {
581 if (domain
->wake_count
++) {
582 fw_domains
&= ~domain
->mask
;
583 domain
->active
= true;
588 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw_domains
);
592 * intel_uncore_forcewake_get - grab forcewake domain references
593 * @dev_priv: i915 device instance
594 * @fw_domains: forcewake domains to get reference on
596 * This function can be used get GT's forcewake domain references.
597 * Normal register access will handle the forcewake domains automatically.
598 * However if some sequence requires the GT to not power down a particular
599 * forcewake domains this function should be called at the beginning of the
600 * sequence. And subsequently the reference should be dropped by symmetric
601 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
602 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
604 void intel_uncore_forcewake_get(struct drm_i915_private
*dev_priv
,
605 enum forcewake_domains fw_domains
)
607 unsigned long irqflags
;
609 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
612 assert_rpm_wakelock_held(dev_priv
);
614 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
615 __intel_uncore_forcewake_get(dev_priv
, fw_domains
);
616 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
620 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
621 * @dev_priv: i915 device instance
623 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
624 * the GT powerwell and in the process disable our debugging for the
625 * duration of userspace's bypass.
627 void intel_uncore_forcewake_user_get(struct drm_i915_private
*dev_priv
)
629 spin_lock_irq(&dev_priv
->uncore
.lock
);
630 if (!dev_priv
->uncore
.user_forcewake
.count
++) {
631 intel_uncore_forcewake_get__locked(dev_priv
, FORCEWAKE_ALL
);
633 /* Save and disable mmio debugging for the user bypass */
634 dev_priv
->uncore
.user_forcewake
.saved_mmio_check
=
635 dev_priv
->uncore
.unclaimed_mmio_check
;
636 dev_priv
->uncore
.user_forcewake
.saved_mmio_debug
=
637 i915_modparams
.mmio_debug
;
639 dev_priv
->uncore
.unclaimed_mmio_check
= 0;
640 i915_modparams
.mmio_debug
= 0;
642 spin_unlock_irq(&dev_priv
->uncore
.lock
);
646 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
647 * @dev_priv: i915 device instance
649 * This function complements intel_uncore_forcewake_user_get() and releases
650 * the GT powerwell taken on behalf of the userspace bypass.
652 void intel_uncore_forcewake_user_put(struct drm_i915_private
*dev_priv
)
654 spin_lock_irq(&dev_priv
->uncore
.lock
);
655 if (!--dev_priv
->uncore
.user_forcewake
.count
) {
656 if (intel_uncore_unclaimed_mmio(dev_priv
))
657 dev_info(dev_priv
->drm
.dev
,
658 "Invalid mmio detected during user access\n");
660 dev_priv
->uncore
.unclaimed_mmio_check
=
661 dev_priv
->uncore
.user_forcewake
.saved_mmio_check
;
662 i915_modparams
.mmio_debug
=
663 dev_priv
->uncore
.user_forcewake
.saved_mmio_debug
;
665 intel_uncore_forcewake_put__locked(dev_priv
, FORCEWAKE_ALL
);
667 spin_unlock_irq(&dev_priv
->uncore
.lock
);
671 * intel_uncore_forcewake_get__locked - grab forcewake domain references
672 * @dev_priv: i915 device instance
673 * @fw_domains: forcewake domains to get reference on
675 * See intel_uncore_forcewake_get(). This variant places the onus
676 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
678 void intel_uncore_forcewake_get__locked(struct drm_i915_private
*dev_priv
,
679 enum forcewake_domains fw_domains
)
681 lockdep_assert_held(&dev_priv
->uncore
.lock
);
683 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
686 __intel_uncore_forcewake_get(dev_priv
, fw_domains
);
689 static void __intel_uncore_forcewake_put(struct drm_i915_private
*dev_priv
,
690 enum forcewake_domains fw_domains
)
692 struct intel_uncore_forcewake_domain
*domain
;
695 fw_domains
&= dev_priv
->uncore
.fw_domains
;
697 for_each_fw_domain_masked(domain
, fw_domains
, dev_priv
, tmp
) {
698 if (WARN_ON(domain
->wake_count
== 0))
701 if (--domain
->wake_count
) {
702 domain
->active
= true;
706 fw_domain_arm_timer(domain
);
711 * intel_uncore_forcewake_put - release a forcewake domain reference
712 * @dev_priv: i915 device instance
713 * @fw_domains: forcewake domains to put references
715 * This function drops the device-level forcewakes for specified
716 * domains obtained by intel_uncore_forcewake_get().
718 void intel_uncore_forcewake_put(struct drm_i915_private
*dev_priv
,
719 enum forcewake_domains fw_domains
)
721 unsigned long irqflags
;
723 if (!dev_priv
->uncore
.funcs
.force_wake_put
)
726 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
727 __intel_uncore_forcewake_put(dev_priv
, fw_domains
);
728 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
732 * intel_uncore_forcewake_put__locked - grab forcewake domain references
733 * @dev_priv: i915 device instance
734 * @fw_domains: forcewake domains to get reference on
736 * See intel_uncore_forcewake_put(). This variant places the onus
737 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
739 void intel_uncore_forcewake_put__locked(struct drm_i915_private
*dev_priv
,
740 enum forcewake_domains fw_domains
)
742 lockdep_assert_held(&dev_priv
->uncore
.lock
);
744 if (!dev_priv
->uncore
.funcs
.force_wake_put
)
747 __intel_uncore_forcewake_put(dev_priv
, fw_domains
);
750 void assert_forcewakes_inactive(struct drm_i915_private
*dev_priv
)
752 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
755 WARN(dev_priv
->uncore
.fw_domains_active
,
756 "Expected all fw_domains to be inactive, but %08x are still on\n",
757 dev_priv
->uncore
.fw_domains_active
);
760 void assert_forcewakes_active(struct drm_i915_private
*dev_priv
,
761 enum forcewake_domains fw_domains
)
763 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
766 assert_rpm_wakelock_held(dev_priv
);
768 fw_domains
&= dev_priv
->uncore
.fw_domains
;
769 WARN(fw_domains
& ~dev_priv
->uncore
.fw_domains_active
,
770 "Expected %08x fw_domains to be active, but %08x are off\n",
771 fw_domains
, fw_domains
& ~dev_priv
->uncore
.fw_domains_active
);
774 /* We give fast paths for the really cool registers */
775 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
777 #define __gen6_reg_read_fw_domains(offset) \
779 enum forcewake_domains __fwd; \
780 if (NEEDS_FORCE_WAKE(offset)) \
781 __fwd = FORCEWAKE_RENDER; \
787 static int fw_range_cmp(u32 offset
, const struct intel_forcewake_range
*entry
)
789 if (offset
< entry
->start
)
791 else if (offset
> entry
->end
)
797 /* Copied and "macroized" from lib/bsearch.c */
798 #define BSEARCH(key, base, num, cmp) ({ \
799 unsigned int start__ = 0, end__ = (num); \
800 typeof(base) result__ = NULL; \
801 while (start__ < end__) { \
802 unsigned int mid__ = start__ + (end__ - start__) / 2; \
803 int ret__ = (cmp)((key), (base) + mid__); \
806 } else if (ret__ > 0) { \
807 start__ = mid__ + 1; \
809 result__ = (base) + mid__; \
816 static enum forcewake_domains
817 find_fw_domain(struct drm_i915_private
*dev_priv
, u32 offset
)
819 const struct intel_forcewake_range
*entry
;
821 entry
= BSEARCH(offset
,
822 dev_priv
->uncore
.fw_domains_table
,
823 dev_priv
->uncore
.fw_domains_table_entries
,
829 WARN(entry
->domains
& ~dev_priv
->uncore
.fw_domains
,
830 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
831 entry
->domains
& ~dev_priv
->uncore
.fw_domains
, offset
);
833 return entry
->domains
;
836 #define GEN_FW_RANGE(s, e, d) \
837 { .start = (s), .end = (e), .domains = (d) }
839 #define HAS_FWTABLE(dev_priv) \
840 (INTEL_GEN(dev_priv) >= 9 || \
841 IS_CHERRYVIEW(dev_priv) || \
842 IS_VALLEYVIEW(dev_priv))
844 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
845 static const struct intel_forcewake_range __vlv_fw_ranges
[] = {
846 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER
),
847 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER
),
848 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER
),
849 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA
),
850 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA
),
851 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER
),
852 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA
),
855 #define __fwtable_reg_read_fw_domains(offset) \
857 enum forcewake_domains __fwd = 0; \
858 if (NEEDS_FORCE_WAKE((offset))) \
859 __fwd = find_fw_domain(dev_priv, offset); \
863 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
864 static const i915_reg_t gen8_shadowed_regs
[] = {
865 RING_TAIL(RENDER_RING_BASE
), /* 0x2000 (base) */
866 GEN6_RPNSWREQ
, /* 0xA008 */
867 GEN6_RC_VIDEO_FREQ
, /* 0xA00C */
868 RING_TAIL(GEN6_BSD_RING_BASE
), /* 0x12000 (base) */
869 RING_TAIL(VEBOX_RING_BASE
), /* 0x1a000 (base) */
870 RING_TAIL(BLT_RING_BASE
), /* 0x22000 (base) */
871 /* TODO: Other registers are not yet used */
874 static int mmio_reg_cmp(u32 key
, const i915_reg_t
*reg
)
876 u32 offset
= i915_mmio_reg_offset(*reg
);
880 else if (key
> offset
)
886 static bool is_gen8_shadowed(u32 offset
)
888 const i915_reg_t
*regs
= gen8_shadowed_regs
;
890 return BSEARCH(offset
, regs
, ARRAY_SIZE(gen8_shadowed_regs
),
894 #define __gen8_reg_write_fw_domains(offset) \
896 enum forcewake_domains __fwd; \
897 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
898 __fwd = FORCEWAKE_RENDER; \
904 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
905 static const struct intel_forcewake_range __chv_fw_ranges
[] = {
906 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER
),
907 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
908 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER
),
909 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
910 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER
),
911 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
912 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA
),
913 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
914 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER
),
915 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA
),
916 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER
),
917 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
918 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA
),
919 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA
),
920 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA
),
921 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA
),
924 #define __fwtable_reg_write_fw_domains(offset) \
926 enum forcewake_domains __fwd = 0; \
927 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
928 __fwd = find_fw_domain(dev_priv, offset); \
932 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
933 static const struct intel_forcewake_range __gen9_fw_ranges
[] = {
934 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER
),
935 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
936 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER
),
937 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER
),
938 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER
),
939 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER
),
940 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER
),
941 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER
),
942 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA
),
943 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER
),
944 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER
),
945 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER
),
946 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER
),
947 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA
),
948 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER
),
949 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER
),
950 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER
),
951 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
952 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER
),
953 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER
),
954 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER
),
955 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA
),
956 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER
),
957 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER
),
958 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER
),
959 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA
),
960 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER
),
961 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA
),
962 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER
),
963 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER
),
964 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER
),
965 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA
),
969 ilk_dummy_write(struct drm_i915_private
*dev_priv
)
971 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
972 * the chip from rc6 before touching it for real. MI_MODE is masked,
973 * hence harmless to write 0 into. */
974 __raw_i915_write32(dev_priv
, MI_MODE
, 0);
978 __unclaimed_reg_debug(struct drm_i915_private
*dev_priv
,
979 const i915_reg_t reg
,
983 if (WARN(check_for_unclaimed_mmio(dev_priv
) && !before
,
984 "Unclaimed %s register 0x%x\n",
985 read
? "read from" : "write to",
986 i915_mmio_reg_offset(reg
)))
987 /* Only report the first N failures */
988 i915_modparams
.mmio_debug
--;
992 unclaimed_reg_debug(struct drm_i915_private
*dev_priv
,
993 const i915_reg_t reg
,
997 if (likely(!i915_modparams
.mmio_debug
))
1000 __unclaimed_reg_debug(dev_priv
, reg
, read
, before
);
1003 #define GEN2_READ_HEADER(x) \
1005 assert_rpm_wakelock_held(dev_priv);
1007 #define GEN2_READ_FOOTER \
1008 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1011 #define __gen2_read(x) \
1013 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1014 GEN2_READ_HEADER(x); \
1015 val = __raw_i915_read##x(dev_priv, reg); \
1019 #define __gen5_read(x) \
1021 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1022 GEN2_READ_HEADER(x); \
1023 ilk_dummy_write(dev_priv); \
1024 val = __raw_i915_read##x(dev_priv, reg); \
1040 #undef GEN2_READ_FOOTER
1041 #undef GEN2_READ_HEADER
1043 #define GEN6_READ_HEADER(x) \
1044 u32 offset = i915_mmio_reg_offset(reg); \
1045 unsigned long irqflags; \
1047 assert_rpm_wakelock_held(dev_priv); \
1048 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1049 unclaimed_reg_debug(dev_priv, reg, true, true)
1051 #define GEN6_READ_FOOTER \
1052 unclaimed_reg_debug(dev_priv, reg, true, false); \
1053 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
1054 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1057 static noinline
void ___force_wake_auto(struct drm_i915_private
*dev_priv
,
1058 enum forcewake_domains fw_domains
)
1060 struct intel_uncore_forcewake_domain
*domain
;
1063 GEM_BUG_ON(fw_domains
& ~dev_priv
->uncore
.fw_domains
);
1065 for_each_fw_domain_masked(domain
, fw_domains
, dev_priv
, tmp
)
1066 fw_domain_arm_timer(domain
);
1068 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw_domains
);
1071 static inline void __force_wake_auto(struct drm_i915_private
*dev_priv
,
1072 enum forcewake_domains fw_domains
)
1074 if (WARN_ON(!fw_domains
))
1077 /* Turn on all requested but inactive supported forcewake domains. */
1078 fw_domains
&= dev_priv
->uncore
.fw_domains
;
1079 fw_domains
&= ~dev_priv
->uncore
.fw_domains_active
;
1082 ___force_wake_auto(dev_priv
, fw_domains
);
1085 #define __gen_read(func, x) \
1087 func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1088 enum forcewake_domains fw_engine; \
1089 GEN6_READ_HEADER(x); \
1090 fw_engine = __##func##_reg_read_fw_domains(offset); \
1092 __force_wake_auto(dev_priv, fw_engine); \
1093 val = __raw_i915_read##x(dev_priv, reg); \
1096 #define __gen6_read(x) __gen_read(gen6, x)
1097 #define __fwtable_read(x) __gen_read(fwtable, x)
1108 #undef __fwtable_read
1110 #undef GEN6_READ_FOOTER
1111 #undef GEN6_READ_HEADER
1113 #define GEN2_WRITE_HEADER \
1114 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1115 assert_rpm_wakelock_held(dev_priv); \
1117 #define GEN2_WRITE_FOOTER
1119 #define __gen2_write(x) \
1121 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1122 GEN2_WRITE_HEADER; \
1123 __raw_i915_write##x(dev_priv, reg, val); \
1124 GEN2_WRITE_FOOTER; \
1127 #define __gen5_write(x) \
1129 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1130 GEN2_WRITE_HEADER; \
1131 ilk_dummy_write(dev_priv); \
1132 __raw_i915_write##x(dev_priv, reg, val); \
1133 GEN2_WRITE_FOOTER; \
1146 #undef GEN2_WRITE_FOOTER
1147 #undef GEN2_WRITE_HEADER
1149 #define GEN6_WRITE_HEADER \
1150 u32 offset = i915_mmio_reg_offset(reg); \
1151 unsigned long irqflags; \
1152 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1153 assert_rpm_wakelock_held(dev_priv); \
1154 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1155 unclaimed_reg_debug(dev_priv, reg, false, true)
1157 #define GEN6_WRITE_FOOTER \
1158 unclaimed_reg_debug(dev_priv, reg, false, false); \
1159 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1161 #define __gen6_write(x) \
1163 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1164 GEN6_WRITE_HEADER; \
1165 if (NEEDS_FORCE_WAKE(offset)) \
1166 __gen6_gt_wait_for_fifo(dev_priv); \
1167 __raw_i915_write##x(dev_priv, reg, val); \
1168 GEN6_WRITE_FOOTER; \
1171 #define __gen_write(func, x) \
1173 func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1174 enum forcewake_domains fw_engine; \
1175 GEN6_WRITE_HEADER; \
1176 fw_engine = __##func##_reg_write_fw_domains(offset); \
1178 __force_wake_auto(dev_priv, fw_engine); \
1179 __raw_i915_write##x(dev_priv, reg, val); \
1180 GEN6_WRITE_FOOTER; \
1182 #define __gen8_write(x) __gen_write(gen8, x)
1183 #define __fwtable_write(x) __gen_write(fwtable, x)
1195 #undef __fwtable_write
1198 #undef GEN6_WRITE_FOOTER
1199 #undef GEN6_WRITE_HEADER
1201 #define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
1203 (i915)->uncore.funcs.mmio_writeb = x##_write8; \
1204 (i915)->uncore.funcs.mmio_writew = x##_write16; \
1205 (i915)->uncore.funcs.mmio_writel = x##_write32; \
1208 #define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
1210 (i915)->uncore.funcs.mmio_readb = x##_read8; \
1211 (i915)->uncore.funcs.mmio_readw = x##_read16; \
1212 (i915)->uncore.funcs.mmio_readl = x##_read32; \
1213 (i915)->uncore.funcs.mmio_readq = x##_read64; \
1217 static void fw_domain_init(struct drm_i915_private
*dev_priv
,
1218 enum forcewake_domain_id domain_id
,
1222 struct intel_uncore_forcewake_domain
*d
;
1224 if (WARN_ON(domain_id
>= FW_DOMAIN_ID_COUNT
))
1227 d
= &dev_priv
->uncore
.fw_domain
[domain_id
];
1229 WARN_ON(d
->wake_count
);
1231 WARN_ON(!i915_mmio_reg_valid(reg_set
));
1232 WARN_ON(!i915_mmio_reg_valid(reg_ack
));
1235 d
->reg_set
= reg_set
;
1236 d
->reg_ack
= reg_ack
;
1240 BUILD_BUG_ON(FORCEWAKE_RENDER
!= (1 << FW_DOMAIN_ID_RENDER
));
1241 BUILD_BUG_ON(FORCEWAKE_BLITTER
!= (1 << FW_DOMAIN_ID_BLITTER
));
1242 BUILD_BUG_ON(FORCEWAKE_MEDIA
!= (1 << FW_DOMAIN_ID_MEDIA
));
1244 d
->mask
= BIT(domain_id
);
1246 hrtimer_init(&d
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1247 d
->timer
.function
= intel_uncore_fw_release_timer
;
1249 dev_priv
->uncore
.fw_domains
|= BIT(domain_id
);
1251 fw_domain_reset(dev_priv
, d
);
1254 static void intel_uncore_fw_domains_init(struct drm_i915_private
*dev_priv
)
1256 if (INTEL_GEN(dev_priv
) <= 5 || intel_vgpu_active(dev_priv
))
1259 if (IS_GEN6(dev_priv
)) {
1260 dev_priv
->uncore
.fw_reset
= 0;
1261 dev_priv
->uncore
.fw_set
= FORCEWAKE_KERNEL
;
1262 dev_priv
->uncore
.fw_clear
= 0;
1264 /* WaRsClearFWBitsAtReset:bdw,skl */
1265 dev_priv
->uncore
.fw_reset
= _MASKED_BIT_DISABLE(0xffff);
1266 dev_priv
->uncore
.fw_set
= _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL
);
1267 dev_priv
->uncore
.fw_clear
= _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL
);
1270 if (INTEL_GEN(dev_priv
) >= 9) {
1271 dev_priv
->uncore
.funcs
.force_wake_get
=
1272 fw_domains_get_with_fallback
;
1273 dev_priv
->uncore
.funcs
.force_wake_put
= fw_domains_put
;
1274 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1275 FORCEWAKE_RENDER_GEN9
,
1276 FORCEWAKE_ACK_RENDER_GEN9
);
1277 fw_domain_init(dev_priv
, FW_DOMAIN_ID_BLITTER
,
1278 FORCEWAKE_BLITTER_GEN9
,
1279 FORCEWAKE_ACK_BLITTER_GEN9
);
1280 fw_domain_init(dev_priv
, FW_DOMAIN_ID_MEDIA
,
1281 FORCEWAKE_MEDIA_GEN9
, FORCEWAKE_ACK_MEDIA_GEN9
);
1282 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
1283 dev_priv
->uncore
.funcs
.force_wake_get
= fw_domains_get
;
1284 dev_priv
->uncore
.funcs
.force_wake_put
= fw_domains_put
;
1285 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1286 FORCEWAKE_VLV
, FORCEWAKE_ACK_VLV
);
1287 fw_domain_init(dev_priv
, FW_DOMAIN_ID_MEDIA
,
1288 FORCEWAKE_MEDIA_VLV
, FORCEWAKE_ACK_MEDIA_VLV
);
1289 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
1290 dev_priv
->uncore
.funcs
.force_wake_get
=
1291 fw_domains_get_with_thread_status
;
1292 dev_priv
->uncore
.funcs
.force_wake_put
= fw_domains_put
;
1293 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1294 FORCEWAKE_MT
, FORCEWAKE_ACK_HSW
);
1295 } else if (IS_IVYBRIDGE(dev_priv
)) {
1298 /* IVB configs may use multi-threaded forcewake */
1300 /* A small trick here - if the bios hasn't configured
1301 * MT forcewake, and if the device is in RC6, then
1302 * force_wake_mt_get will not wake the device and the
1303 * ECOBUS read will return zero. Which will be
1304 * (correctly) interpreted by the test below as MT
1305 * forcewake being disabled.
1307 dev_priv
->uncore
.funcs
.force_wake_get
=
1308 fw_domains_get_with_thread_status
;
1309 dev_priv
->uncore
.funcs
.force_wake_put
= fw_domains_put
;
1311 /* We need to init first for ECOBUS access and then
1312 * determine later if we want to reinit, in case of MT access is
1313 * not working. In this stage we don't know which flavour this
1314 * ivb is, so it is better to reset also the gen6 fw registers
1315 * before the ecobus check.
1318 __raw_i915_write32(dev_priv
, FORCEWAKE
, 0);
1319 __raw_posting_read(dev_priv
, ECOBUS
);
1321 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1322 FORCEWAKE_MT
, FORCEWAKE_MT_ACK
);
1324 spin_lock_irq(&dev_priv
->uncore
.lock
);
1325 fw_domains_get_with_thread_status(dev_priv
, FORCEWAKE_RENDER
);
1326 ecobus
= __raw_i915_read32(dev_priv
, ECOBUS
);
1327 fw_domains_put(dev_priv
, FORCEWAKE_RENDER
);
1328 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1330 if (!(ecobus
& FORCEWAKE_MT_ENABLE
)) {
1331 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1332 DRM_INFO("when using vblank-synced partial screen updates.\n");
1333 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1334 FORCEWAKE
, FORCEWAKE_ACK
);
1336 } else if (IS_GEN6(dev_priv
)) {
1337 dev_priv
->uncore
.funcs
.force_wake_get
=
1338 fw_domains_get_with_thread_status
;
1339 dev_priv
->uncore
.funcs
.force_wake_put
= fw_domains_put
;
1340 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1341 FORCEWAKE
, FORCEWAKE_ACK
);
1344 /* All future platforms are expected to require complex power gating */
1345 WARN_ON(dev_priv
->uncore
.fw_domains
== 0);
1348 #define ASSIGN_FW_DOMAINS_TABLE(d) \
1350 dev_priv->uncore.fw_domains_table = \
1351 (struct intel_forcewake_range *)(d); \
1352 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1355 static int i915_pmic_bus_access_notifier(struct notifier_block
*nb
,
1356 unsigned long action
, void *data
)
1358 struct drm_i915_private
*dev_priv
= container_of(nb
,
1359 struct drm_i915_private
, uncore
.pmic_bus_access_nb
);
1362 case MBI_PMIC_BUS_ACCESS_BEGIN
:
1364 * forcewake all now to make sure that we don't need to do a
1365 * forcewake later which on systems where this notifier gets
1366 * called requires the punit to access to the shared pmic i2c
1367 * bus, which will be busy after this notification, leading to:
1368 * "render: timed out waiting for forcewake ack request."
1371 * The notifier is unregistered during intel_runtime_suspend(),
1372 * so it's ok to access the HW here without holding a RPM
1373 * wake reference -> disable wakeref asserts for the time of
1376 disable_rpm_wakeref_asserts(dev_priv
);
1377 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1378 enable_rpm_wakeref_asserts(dev_priv
);
1380 case MBI_PMIC_BUS_ACCESS_END
:
1381 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1388 void intel_uncore_init(struct drm_i915_private
*dev_priv
)
1390 i915_check_vgpu(dev_priv
);
1392 intel_uncore_edram_detect(dev_priv
);
1393 intel_uncore_fw_domains_init(dev_priv
);
1394 __intel_uncore_early_sanitize(dev_priv
, false);
1396 dev_priv
->uncore
.unclaimed_mmio_check
= 1;
1397 dev_priv
->uncore
.pmic_bus_access_nb
.notifier_call
=
1398 i915_pmic_bus_access_notifier
;
1400 if (IS_GEN(dev_priv
, 2, 4) || intel_vgpu_active(dev_priv
)) {
1401 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv
, gen2
);
1402 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, gen2
);
1403 } else if (IS_GEN5(dev_priv
)) {
1404 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv
, gen5
);
1405 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, gen5
);
1406 } else if (IS_GEN(dev_priv
, 6, 7)) {
1407 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv
, gen6
);
1409 if (IS_VALLEYVIEW(dev_priv
)) {
1410 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges
);
1411 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, fwtable
);
1413 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, gen6
);
1415 } else if (IS_GEN8(dev_priv
)) {
1416 if (IS_CHERRYVIEW(dev_priv
)) {
1417 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges
);
1418 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv
, fwtable
);
1419 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, fwtable
);
1422 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv
, gen8
);
1423 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, gen6
);
1426 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges
);
1427 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv
, fwtable
);
1428 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, fwtable
);
1431 iosf_mbi_register_pmic_bus_access_notifier(
1432 &dev_priv
->uncore
.pmic_bus_access_nb
);
1435 void intel_uncore_fini(struct drm_i915_private
*dev_priv
)
1437 /* Paranoia: make sure we have disabled everything before we exit. */
1438 intel_uncore_sanitize(dev_priv
);
1440 iosf_mbi_punit_acquire();
1441 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1442 &dev_priv
->uncore
.pmic_bus_access_nb
);
1443 intel_uncore_forcewake_reset(dev_priv
, false);
1444 iosf_mbi_punit_release();
1447 static const struct reg_whitelist
{
1448 i915_reg_t offset_ldw
;
1449 i915_reg_t offset_udw
;
1452 } reg_read_whitelist
[] = { {
1453 .offset_ldw
= RING_TIMESTAMP(RENDER_RING_BASE
),
1454 .offset_udw
= RING_TIMESTAMP_UDW(RENDER_RING_BASE
),
1455 .gen_mask
= INTEL_GEN_MASK(4, 10),
1459 int i915_reg_read_ioctl(struct drm_device
*dev
,
1460 void *data
, struct drm_file
*file
)
1462 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1463 struct drm_i915_reg_read
*reg
= data
;
1464 struct reg_whitelist
const *entry
;
1469 entry
= reg_read_whitelist
;
1470 remain
= ARRAY_SIZE(reg_read_whitelist
);
1472 u32 entry_offset
= i915_mmio_reg_offset(entry
->offset_ldw
);
1474 GEM_BUG_ON(!is_power_of_2(entry
->size
));
1475 GEM_BUG_ON(entry
->size
> 8);
1476 GEM_BUG_ON(entry_offset
& (entry
->size
- 1));
1478 if (INTEL_INFO(dev_priv
)->gen_mask
& entry
->gen_mask
&&
1479 entry_offset
== (reg
->offset
& -entry
->size
))
1488 flags
= reg
->offset
& (entry
->size
- 1);
1490 intel_runtime_pm_get(dev_priv
);
1491 if (entry
->size
== 8 && flags
== I915_REG_READ_8B_WA
)
1492 reg
->val
= I915_READ64_2x32(entry
->offset_ldw
,
1494 else if (entry
->size
== 8 && flags
== 0)
1495 reg
->val
= I915_READ64(entry
->offset_ldw
);
1496 else if (entry
->size
== 4 && flags
== 0)
1497 reg
->val
= I915_READ(entry
->offset_ldw
);
1498 else if (entry
->size
== 2 && flags
== 0)
1499 reg
->val
= I915_READ16(entry
->offset_ldw
);
1500 else if (entry
->size
== 1 && flags
== 0)
1501 reg
->val
= I915_READ8(entry
->offset_ldw
);
1504 intel_runtime_pm_put(dev_priv
);
1509 static void gen3_stop_engine(struct intel_engine_cs
*engine
)
1511 struct drm_i915_private
*dev_priv
= engine
->i915
;
1512 const u32 base
= engine
->mmio_base
;
1513 const i915_reg_t mode
= RING_MI_MODE(base
);
1515 I915_WRITE_FW(mode
, _MASKED_BIT_ENABLE(STOP_RING
));
1516 if (intel_wait_for_register_fw(dev_priv
,
1521 DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
1524 I915_WRITE_FW(RING_HEAD(base
), I915_READ_FW(RING_TAIL(base
)));
1526 I915_WRITE_FW(RING_HEAD(base
), 0);
1527 I915_WRITE_FW(RING_TAIL(base
), 0);
1529 /* The ring must be empty before it is disabled */
1530 I915_WRITE_FW(RING_CTL(base
), 0);
1532 /* Check acts as a post */
1533 if (I915_READ_FW(RING_HEAD(base
)) != 0)
1534 DRM_DEBUG_DRIVER("%s: ring head not parked\n",
1538 static void i915_stop_engines(struct drm_i915_private
*dev_priv
,
1539 unsigned engine_mask
)
1541 struct intel_engine_cs
*engine
;
1542 enum intel_engine_id id
;
1544 if (INTEL_GEN(dev_priv
) < 3)
1547 for_each_engine_masked(engine
, dev_priv
, engine_mask
, id
)
1548 gen3_stop_engine(engine
);
1551 static bool i915_reset_complete(struct pci_dev
*pdev
)
1555 pci_read_config_byte(pdev
, I915_GDRST
, &gdrst
);
1556 return (gdrst
& GRDOM_RESET_STATUS
) == 0;
1559 static int i915_do_reset(struct drm_i915_private
*dev_priv
, unsigned engine_mask
)
1561 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
1563 /* assert reset for at least 20 usec */
1564 pci_write_config_byte(pdev
, I915_GDRST
, GRDOM_RESET_ENABLE
);
1565 usleep_range(50, 200);
1566 pci_write_config_byte(pdev
, I915_GDRST
, 0);
1568 return wait_for(i915_reset_complete(pdev
), 500);
1571 static bool g4x_reset_complete(struct pci_dev
*pdev
)
1575 pci_read_config_byte(pdev
, I915_GDRST
, &gdrst
);
1576 return (gdrst
& GRDOM_RESET_ENABLE
) == 0;
1579 static int g33_do_reset(struct drm_i915_private
*dev_priv
, unsigned engine_mask
)
1581 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
1583 pci_write_config_byte(pdev
, I915_GDRST
, GRDOM_RESET_ENABLE
);
1584 return wait_for(g4x_reset_complete(pdev
), 500);
1587 static int g4x_do_reset(struct drm_i915_private
*dev_priv
, unsigned engine_mask
)
1589 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
1592 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1593 I915_WRITE(VDECCLK_GATE_D
,
1594 I915_READ(VDECCLK_GATE_D
) | VCP_UNIT_CLOCK_GATE_DISABLE
);
1595 POSTING_READ(VDECCLK_GATE_D
);
1597 pci_write_config_byte(pdev
, I915_GDRST
,
1598 GRDOM_MEDIA
| GRDOM_RESET_ENABLE
);
1599 ret
= wait_for(g4x_reset_complete(pdev
), 500);
1601 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1605 pci_write_config_byte(pdev
, I915_GDRST
,
1606 GRDOM_RENDER
| GRDOM_RESET_ENABLE
);
1607 ret
= wait_for(g4x_reset_complete(pdev
), 500);
1609 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1614 pci_write_config_byte(pdev
, I915_GDRST
, 0);
1616 I915_WRITE(VDECCLK_GATE_D
,
1617 I915_READ(VDECCLK_GATE_D
) & ~VCP_UNIT_CLOCK_GATE_DISABLE
);
1618 POSTING_READ(VDECCLK_GATE_D
);
1623 static int ironlake_do_reset(struct drm_i915_private
*dev_priv
,
1624 unsigned engine_mask
)
1628 I915_WRITE(ILK_GDSR
, ILK_GRDOM_RENDER
| ILK_GRDOM_RESET_ENABLE
);
1629 ret
= intel_wait_for_register(dev_priv
,
1630 ILK_GDSR
, ILK_GRDOM_RESET_ENABLE
, 0,
1633 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1637 I915_WRITE(ILK_GDSR
, ILK_GRDOM_MEDIA
| ILK_GRDOM_RESET_ENABLE
);
1638 ret
= intel_wait_for_register(dev_priv
,
1639 ILK_GDSR
, ILK_GRDOM_RESET_ENABLE
, 0,
1642 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1647 I915_WRITE(ILK_GDSR
, 0);
1648 POSTING_READ(ILK_GDSR
);
1652 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1653 static int gen6_hw_domain_reset(struct drm_i915_private
*dev_priv
,
1658 /* GEN6_GDRST is not in the gt power well, no need to check
1659 * for fifo space for the write or forcewake the chip for
1662 __raw_i915_write32(dev_priv
, GEN6_GDRST
, hw_domain_mask
);
1664 /* Wait for the device to ack the reset requests */
1665 err
= intel_wait_for_register_fw(dev_priv
,
1666 GEN6_GDRST
, hw_domain_mask
, 0,
1669 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
1676 * gen6_reset_engines - reset individual engines
1677 * @dev_priv: i915 device
1678 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1680 * This function will reset the individual engines that are set in engine_mask.
1681 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1683 * Note: It is responsibility of the caller to handle the difference between
1684 * asking full domain reset versus reset for all available individual engines.
1686 * Returns 0 on success, nonzero on error.
1688 static int gen6_reset_engines(struct drm_i915_private
*dev_priv
,
1689 unsigned engine_mask
)
1691 struct intel_engine_cs
*engine
;
1692 const u32 hw_engine_mask
[I915_NUM_ENGINES
] = {
1693 [RCS
] = GEN6_GRDOM_RENDER
,
1694 [BCS
] = GEN6_GRDOM_BLT
,
1695 [VCS
] = GEN6_GRDOM_MEDIA
,
1696 [VCS2
] = GEN8_GRDOM_MEDIA2
,
1697 [VECS
] = GEN6_GRDOM_VECS
,
1701 if (engine_mask
== ALL_ENGINES
) {
1702 hw_mask
= GEN6_GRDOM_FULL
;
1707 for_each_engine_masked(engine
, dev_priv
, engine_mask
, tmp
)
1708 hw_mask
|= hw_engine_mask
[engine
->id
];
1711 return gen6_hw_domain_reset(dev_priv
, hw_mask
);
1715 * __intel_wait_for_register_fw - wait until register matches expected state
1716 * @dev_priv: the i915 device
1717 * @reg: the register to read
1718 * @mask: mask to apply to register value
1719 * @value: expected value
1720 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1721 * @slow_timeout_ms: slow timeout in millisecond
1722 * @out_value: optional placeholder to hold registry value
1724 * This routine waits until the target register @reg contains the expected
1725 * @value after applying the @mask, i.e. it waits until ::
1727 * (I915_READ_FW(reg) & mask) == value
1729 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
1730 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
1731 * must be not larger than 20,0000 microseconds.
1733 * Note that this routine assumes the caller holds forcewake asserted, it is
1734 * not suitable for very long waits. See intel_wait_for_register() if you
1735 * wish to wait without holding forcewake for the duration (i.e. you expect
1736 * the wait to be slow).
1738 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1740 int __intel_wait_for_register_fw(struct drm_i915_private
*dev_priv
,
1744 unsigned int fast_timeout_us
,
1745 unsigned int slow_timeout_ms
,
1748 u32
uninitialized_var(reg_value
);
1749 #define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
1752 /* Catch any overuse of this function */
1753 might_sleep_if(slow_timeout_ms
);
1754 GEM_BUG_ON(fast_timeout_us
> 20000);
1757 if (fast_timeout_us
&& fast_timeout_us
<= 20000)
1758 ret
= _wait_for_atomic(done
, fast_timeout_us
, 0);
1759 if (ret
&& slow_timeout_ms
)
1760 ret
= wait_for(done
, slow_timeout_ms
);
1763 *out_value
= reg_value
;
1770 * intel_wait_for_register - wait until register matches expected state
1771 * @dev_priv: the i915 device
1772 * @reg: the register to read
1773 * @mask: mask to apply to register value
1774 * @value: expected value
1775 * @timeout_ms: timeout in millisecond
1777 * This routine waits until the target register @reg contains the expected
1778 * @value after applying the @mask, i.e. it waits until ::
1780 * (I915_READ(reg) & mask) == value
1782 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1784 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1786 int intel_wait_for_register(struct drm_i915_private
*dev_priv
,
1790 unsigned int timeout_ms
)
1793 intel_uncore_forcewake_for_reg(dev_priv
, reg
, FW_REG_READ
);
1798 spin_lock_irq(&dev_priv
->uncore
.lock
);
1799 intel_uncore_forcewake_get__locked(dev_priv
, fw
);
1801 ret
= __intel_wait_for_register_fw(dev_priv
,
1805 intel_uncore_forcewake_put__locked(dev_priv
, fw
);
1806 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1809 ret
= wait_for((I915_READ_NOTRACE(reg
) & mask
) == value
,
1815 static int gen8_reset_engine_start(struct intel_engine_cs
*engine
)
1817 struct drm_i915_private
*dev_priv
= engine
->i915
;
1820 I915_WRITE_FW(RING_RESET_CTL(engine
->mmio_base
),
1821 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET
));
1823 ret
= intel_wait_for_register_fw(dev_priv
,
1824 RING_RESET_CTL(engine
->mmio_base
),
1825 RESET_CTL_READY_TO_RESET
,
1826 RESET_CTL_READY_TO_RESET
,
1829 DRM_ERROR("%s: reset request timeout\n", engine
->name
);
1834 static void gen8_reset_engine_cancel(struct intel_engine_cs
*engine
)
1836 struct drm_i915_private
*dev_priv
= engine
->i915
;
1838 I915_WRITE_FW(RING_RESET_CTL(engine
->mmio_base
),
1839 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET
));
1842 static int gen8_reset_engines(struct drm_i915_private
*dev_priv
,
1843 unsigned engine_mask
)
1845 struct intel_engine_cs
*engine
;
1848 for_each_engine_masked(engine
, dev_priv
, engine_mask
, tmp
)
1849 if (gen8_reset_engine_start(engine
))
1852 return gen6_reset_engines(dev_priv
, engine_mask
);
1855 for_each_engine_masked(engine
, dev_priv
, engine_mask
, tmp
)
1856 gen8_reset_engine_cancel(engine
);
1861 typedef int (*reset_func
)(struct drm_i915_private
*, unsigned engine_mask
);
1863 static reset_func
intel_get_gpu_reset(struct drm_i915_private
*dev_priv
)
1865 if (!i915_modparams
.reset
)
1868 if (INTEL_INFO(dev_priv
)->gen
>= 8)
1869 return gen8_reset_engines
;
1870 else if (INTEL_INFO(dev_priv
)->gen
>= 6)
1871 return gen6_reset_engines
;
1872 else if (IS_GEN5(dev_priv
))
1873 return ironlake_do_reset
;
1874 else if (IS_G4X(dev_priv
))
1875 return g4x_do_reset
;
1876 else if (IS_G33(dev_priv
) || IS_PINEVIEW(dev_priv
))
1877 return g33_do_reset
;
1878 else if (INTEL_INFO(dev_priv
)->gen
>= 3)
1879 return i915_do_reset
;
1884 int intel_gpu_reset(struct drm_i915_private
*dev_priv
, unsigned engine_mask
)
1886 reset_func reset
= intel_get_gpu_reset(dev_priv
);
1892 /* If the power well sleeps during the reset, the reset
1893 * request may be dropped and never completes (causing -EIO).
1895 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1896 for (retry
= 0; retry
< 3; retry
++) {
1898 /* We stop engines, otherwise we might get failed reset and a
1899 * dead gpu (on elk). Also as modern gpu as kbl can suffer
1900 * from system hang if batchbuffer is progressing when
1901 * the reset is issued, regardless of READY_TO_RESET ack.
1902 * Thus assume it is best to stop engines on all gens
1903 * where we have a gpu reset.
1905 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
1907 * FIXME: Wa for more modern gens needs to be validated
1909 i915_stop_engines(dev_priv
, engine_mask
);
1913 ret
= reset(dev_priv
, engine_mask
);
1914 if (ret
!= -ETIMEDOUT
)
1919 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1924 bool intel_has_gpu_reset(struct drm_i915_private
*dev_priv
)
1926 return intel_get_gpu_reset(dev_priv
) != NULL
;
1929 bool intel_has_reset_engine(struct drm_i915_private
*dev_priv
)
1931 return (dev_priv
->info
.has_reset_engine
&&
1932 i915_modparams
.reset
>= 2);
1935 int intel_reset_guc(struct drm_i915_private
*dev_priv
)
1939 if (!HAS_GUC(dev_priv
))
1942 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1943 ret
= gen6_hw_domain_reset(dev_priv
, GEN9_GRDOM_GUC
);
1944 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1949 bool intel_uncore_unclaimed_mmio(struct drm_i915_private
*dev_priv
)
1951 return check_for_unclaimed_mmio(dev_priv
);
1955 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private
*dev_priv
)
1957 if (unlikely(i915_modparams
.mmio_debug
||
1958 dev_priv
->uncore
.unclaimed_mmio_check
<= 0))
1961 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv
))) {
1962 DRM_DEBUG("Unclaimed register detected, "
1963 "enabling oneshot unclaimed register reporting. "
1964 "Please use i915.mmio_debug=N for more information.\n");
1965 i915_modparams
.mmio_debug
++;
1966 dev_priv
->uncore
.unclaimed_mmio_check
--;
1973 static enum forcewake_domains
1974 intel_uncore_forcewake_for_read(struct drm_i915_private
*dev_priv
,
1977 u32 offset
= i915_mmio_reg_offset(reg
);
1978 enum forcewake_domains fw_domains
;
1980 if (HAS_FWTABLE(dev_priv
)) {
1981 fw_domains
= __fwtable_reg_read_fw_domains(offset
);
1982 } else if (INTEL_GEN(dev_priv
) >= 6) {
1983 fw_domains
= __gen6_reg_read_fw_domains(offset
);
1985 WARN_ON(!IS_GEN(dev_priv
, 2, 5));
1989 WARN_ON(fw_domains
& ~dev_priv
->uncore
.fw_domains
);
1994 static enum forcewake_domains
1995 intel_uncore_forcewake_for_write(struct drm_i915_private
*dev_priv
,
1998 u32 offset
= i915_mmio_reg_offset(reg
);
1999 enum forcewake_domains fw_domains
;
2001 if (HAS_FWTABLE(dev_priv
) && !IS_VALLEYVIEW(dev_priv
)) {
2002 fw_domains
= __fwtable_reg_write_fw_domains(offset
);
2003 } else if (IS_GEN8(dev_priv
)) {
2004 fw_domains
= __gen8_reg_write_fw_domains(offset
);
2005 } else if (IS_GEN(dev_priv
, 6, 7)) {
2006 fw_domains
= FORCEWAKE_RENDER
;
2008 WARN_ON(!IS_GEN(dev_priv
, 2, 5));
2012 WARN_ON(fw_domains
& ~dev_priv
->uncore
.fw_domains
);
2018 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2020 * @dev_priv: pointer to struct drm_i915_private
2021 * @reg: register in question
2022 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2024 * Returns a set of forcewake domains required to be taken with for example
2025 * intel_uncore_forcewake_get for the specified register to be accessible in the
2026 * specified mode (read, write or read/write) with raw mmio accessors.
2028 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2029 * callers to do FIFO management on their own or risk losing writes.
2031 enum forcewake_domains
2032 intel_uncore_forcewake_for_reg(struct drm_i915_private
*dev_priv
,
2033 i915_reg_t reg
, unsigned int op
)
2035 enum forcewake_domains fw_domains
= 0;
2039 if (intel_vgpu_active(dev_priv
))
2042 if (op
& FW_REG_READ
)
2043 fw_domains
= intel_uncore_forcewake_for_read(dev_priv
, reg
);
2045 if (op
& FW_REG_WRITE
)
2046 fw_domains
|= intel_uncore_forcewake_for_write(dev_priv
, reg
);
2051 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2052 #include "selftests/mock_uncore.c"
2053 #include "selftests/intel_uncore.c"