2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <drm/i915_drm.h>
10 #include "intel_breadcrumbs.h"
12 #include "intel_gt_clock_utils.h"
13 #include "intel_gt_irq.h"
14 #include "intel_gt_pm_irq.h"
15 #include "intel_rps.h"
16 #include "intel_sideband.h"
17 #include "../../../platform/x86/intel_ips.h"
19 #define BUSY_MAX_EI 20u /* ms */
22 * Lock protecting IPS related data structures
24 static DEFINE_SPINLOCK(mchdev_lock
);
26 static struct intel_gt
*rps_to_gt(struct intel_rps
*rps
)
28 return container_of(rps
, struct intel_gt
, rps
);
31 static struct drm_i915_private
*rps_to_i915(struct intel_rps
*rps
)
33 return rps_to_gt(rps
)->i915
;
36 static struct intel_uncore
*rps_to_uncore(struct intel_rps
*rps
)
38 return rps_to_gt(rps
)->uncore
;
41 static u32
rps_pm_sanitize_mask(struct intel_rps
*rps
, u32 mask
)
43 return mask
& ~rps
->pm_intrmsk_mbz
;
46 static inline void set(struct intel_uncore
*uncore
, i915_reg_t reg
, u32 val
)
48 intel_uncore_write_fw(uncore
, reg
, val
);
51 static void rps_timer(struct timer_list
*t
)
53 struct intel_rps
*rps
= from_timer(rps
, t
, timer
);
54 struct intel_engine_cs
*engine
;
55 ktime_t dt
, last
, timestamp
;
56 enum intel_engine_id id
;
60 for_each_engine(engine
, rps_to_gt(rps
), id
) {
64 dt
= intel_engine_get_busy_time(engine
, ×tamp
);
65 last
= engine
->stats
.rps
;
66 engine
->stats
.rps
= dt
;
68 busy
= ktime_to_ns(ktime_sub(dt
, last
));
69 for (i
= 0; i
< ARRAY_SIZE(max_busy
); i
++) {
70 if (busy
> max_busy
[i
])
71 swap(busy
, max_busy
[i
]);
74 last
= rps
->pm_timestamp
;
75 rps
->pm_timestamp
= timestamp
;
77 if (intel_rps_is_active(rps
)) {
81 dt
= ktime_sub(timestamp
, last
);
84 * Our goal is to evaluate each engine independently, so we run
85 * at the lowest clocks required to sustain the heaviest
86 * workload. However, a task may be split into sequential
87 * dependent operations across a set of engines, such that
88 * the independent contributions do not account for high load,
89 * but overall the task is GPU bound. For example, consider
90 * video decode on vcs followed by colour post-processing
91 * on vecs, followed by general post-processing on rcs.
92 * Since multi-engines being active does imply a single
93 * continuous workload across all engines, we hedge our
94 * bets by only contributing a factor of the distributed
95 * load into our busyness calculation.
98 for (i
= 1; i
< ARRAY_SIZE(max_busy
); i
++) {
102 busy
+= div_u64(max_busy
[i
], 1 << i
);
104 GT_TRACE(rps_to_gt(rps
),
105 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
106 busy
, (int)div64_u64(100 * busy
, dt
),
107 max_busy
[0], max_busy
[1], max_busy
[2],
110 if (100 * busy
> rps
->power
.up_threshold
* dt
&&
111 rps
->cur_freq
< rps
->max_freq_softlimit
) {
112 rps
->pm_iir
|= GEN6_PM_RP_UP_THRESHOLD
;
113 rps
->pm_interval
= 1;
114 schedule_work(&rps
->work
);
115 } else if (100 * busy
< rps
->power
.down_threshold
* dt
&&
116 rps
->cur_freq
> rps
->min_freq_softlimit
) {
117 rps
->pm_iir
|= GEN6_PM_RP_DOWN_THRESHOLD
;
118 rps
->pm_interval
= 1;
119 schedule_work(&rps
->work
);
124 mod_timer(&rps
->timer
,
125 jiffies
+ msecs_to_jiffies(rps
->pm_interval
));
126 rps
->pm_interval
= min(rps
->pm_interval
* 2, BUSY_MAX_EI
);
130 static void rps_start_timer(struct intel_rps
*rps
)
132 rps
->pm_timestamp
= ktime_sub(ktime_get(), rps
->pm_timestamp
);
133 rps
->pm_interval
= 1;
134 mod_timer(&rps
->timer
, jiffies
+ 1);
137 static void rps_stop_timer(struct intel_rps
*rps
)
139 del_timer_sync(&rps
->timer
);
140 rps
->pm_timestamp
= ktime_sub(ktime_get(), rps
->pm_timestamp
);
141 cancel_work_sync(&rps
->work
);
144 static u32
rps_pm_mask(struct intel_rps
*rps
, u8 val
)
148 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
149 if (val
> rps
->min_freq_softlimit
)
150 mask
|= (GEN6_PM_RP_UP_EI_EXPIRED
|
151 GEN6_PM_RP_DOWN_THRESHOLD
|
152 GEN6_PM_RP_DOWN_TIMEOUT
);
154 if (val
< rps
->max_freq_softlimit
)
155 mask
|= GEN6_PM_RP_UP_EI_EXPIRED
| GEN6_PM_RP_UP_THRESHOLD
;
157 mask
&= rps
->pm_events
;
159 return rps_pm_sanitize_mask(rps
, ~mask
);
162 static void rps_reset_ei(struct intel_rps
*rps
)
164 memset(&rps
->ei
, 0, sizeof(rps
->ei
));
167 static void rps_enable_interrupts(struct intel_rps
*rps
)
169 struct intel_gt
*gt
= rps_to_gt(rps
);
171 GT_TRACE(gt
, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
172 rps
->pm_events
, rps_pm_mask(rps
, rps
->last_freq
));
176 spin_lock_irq(>
->irq_lock
);
177 gen6_gt_pm_enable_irq(gt
, rps
->pm_events
);
178 spin_unlock_irq(>
->irq_lock
);
180 intel_uncore_write(gt
->uncore
,
181 GEN6_PMINTRMSK
, rps_pm_mask(rps
, rps
->last_freq
));
184 static void gen6_rps_reset_interrupts(struct intel_rps
*rps
)
186 gen6_gt_pm_reset_iir(rps_to_gt(rps
), GEN6_PM_RPS_EVENTS
);
189 static void gen11_rps_reset_interrupts(struct intel_rps
*rps
)
191 while (gen11_gt_reset_one_iir(rps_to_gt(rps
), 0, GEN11_GTPM
))
195 static void rps_reset_interrupts(struct intel_rps
*rps
)
197 struct intel_gt
*gt
= rps_to_gt(rps
);
199 spin_lock_irq(>
->irq_lock
);
200 if (INTEL_GEN(gt
->i915
) >= 11)
201 gen11_rps_reset_interrupts(rps
);
203 gen6_rps_reset_interrupts(rps
);
206 spin_unlock_irq(>
->irq_lock
);
209 static void rps_disable_interrupts(struct intel_rps
*rps
)
211 struct intel_gt
*gt
= rps_to_gt(rps
);
213 intel_uncore_write(gt
->uncore
,
214 GEN6_PMINTRMSK
, rps_pm_sanitize_mask(rps
, ~0u));
216 spin_lock_irq(>
->irq_lock
);
217 gen6_gt_pm_disable_irq(gt
, GEN6_PM_RPS_EVENTS
);
218 spin_unlock_irq(>
->irq_lock
);
220 intel_synchronize_irq(gt
->i915
);
223 * Now that we will not be generating any more work, flush any
224 * outstanding tasks. As we are called on the RPS idle path,
225 * we will reset the GPU to minimum frequencies, so the current
226 * state of the worker can be discarded.
228 cancel_work_sync(&rps
->work
);
230 rps_reset_interrupts(rps
);
231 GT_TRACE(gt
, "interrupts:off\n");
234 static const struct cparams
{
240 { 1, 1333, 301, 28664 },
241 { 1, 1066, 294, 24460 },
242 { 1, 800, 294, 25192 },
243 { 0, 1333, 276, 27605 },
244 { 0, 1066, 276, 27605 },
245 { 0, 800, 231, 23784 },
248 static void gen5_rps_init(struct intel_rps
*rps
)
250 struct drm_i915_private
*i915
= rps_to_i915(rps
);
251 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
252 u8 fmax
, fmin
, fstart
;
256 if (i915
->fsb_freq
<= 3200)
258 else if (i915
->fsb_freq
<= 4800)
263 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
264 if (cparams
[i
].i
== c_m
&& cparams
[i
].t
== i915
->mem_freq
) {
265 rps
->ips
.m
= cparams
[i
].m
;
266 rps
->ips
.c
= cparams
[i
].c
;
271 rgvmodectl
= intel_uncore_read(uncore
, MEMMODECTL
);
273 /* Set up min, max, and cur for interrupt handling */
274 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
275 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
276 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
277 MEMMODE_FSTART_SHIFT
;
278 drm_dbg(&i915
->drm
, "fmax: %d, fmin: %d, fstart: %d\n",
281 rps
->min_freq
= fmax
;
282 rps
->efficient_freq
= fstart
;
283 rps
->max_freq
= fmin
;
287 __ips_chipset_val(struct intel_ips
*ips
)
289 struct intel_uncore
*uncore
=
290 rps_to_uncore(container_of(ips
, struct intel_rps
, ips
));
291 unsigned long now
= jiffies_to_msecs(jiffies
), dt
;
292 unsigned long result
;
295 lockdep_assert_held(&mchdev_lock
);
298 * Prevent division-by-zero if we are asking too fast.
299 * Also, we don't get interesting results if we are polling
300 * faster than once in 10ms, so just return the saved value
303 dt
= now
- ips
->last_time1
;
305 return ips
->chipset_power
;
307 /* FIXME: handle per-counter overflow */
308 total
= intel_uncore_read(uncore
, DMIEC
);
309 total
+= intel_uncore_read(uncore
, DDREC
);
310 total
+= intel_uncore_read(uncore
, CSIEC
);
312 delta
= total
- ips
->last_count1
;
314 result
= div_u64(div_u64(ips
->m
* delta
, dt
) + ips
->c
, 10);
316 ips
->last_count1
= total
;
317 ips
->last_time1
= now
;
319 ips
->chipset_power
= result
;
324 static unsigned long ips_mch_val(struct intel_uncore
*uncore
)
326 unsigned int m
, x
, b
;
329 tsfs
= intel_uncore_read(uncore
, TSFS
);
330 x
= intel_uncore_read8(uncore
, TR1
);
332 b
= tsfs
& TSFS_INTR_MASK
;
333 m
= (tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
;
335 return m
* x
/ 127 - b
;
338 static int _pxvid_to_vd(u8 pxvid
)
343 if (pxvid
>= 8 && pxvid
< 31)
346 return (pxvid
+ 2) * 125;
349 static u32
pvid_to_extvid(struct drm_i915_private
*i915
, u8 pxvid
)
351 const int vd
= _pxvid_to_vd(pxvid
);
353 if (INTEL_INFO(i915
)->is_mobile
)
354 return max(vd
- 1125, 0);
359 static void __gen5_ips_update(struct intel_ips
*ips
)
361 struct intel_uncore
*uncore
=
362 rps_to_uncore(container_of(ips
, struct intel_rps
, ips
));
366 lockdep_assert_held(&mchdev_lock
);
368 now
= ktime_get_raw_ns();
369 dt
= now
- ips
->last_time2
;
370 do_div(dt
, NSEC_PER_MSEC
);
372 /* Don't divide by 0 */
376 count
= intel_uncore_read(uncore
, GFXEC
);
377 delta
= count
- ips
->last_count2
;
379 ips
->last_count2
= count
;
380 ips
->last_time2
= now
;
382 /* More magic constants... */
383 ips
->gfx_power
= div_u64(delta
* 1181, dt
* 10);
386 static void gen5_rps_update(struct intel_rps
*rps
)
388 spin_lock_irq(&mchdev_lock
);
389 __gen5_ips_update(&rps
->ips
);
390 spin_unlock_irq(&mchdev_lock
);
393 static unsigned int gen5_invert_freq(struct intel_rps
*rps
,
396 /* Invert the frequency bin into an ips delay */
397 val
= rps
->max_freq
- val
;
398 val
= rps
->min_freq
+ val
;
403 static bool gen5_rps_set(struct intel_rps
*rps
, u8 val
)
405 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
408 lockdep_assert_held(&mchdev_lock
);
410 rgvswctl
= intel_uncore_read16(uncore
, MEMSWCTL
);
411 if (rgvswctl
& MEMCTL_CMD_STS
) {
412 DRM_DEBUG("gpu busy, RCS change rejected\n");
413 return false; /* still busy with another command */
416 /* Invert the frequency bin into an ips delay */
417 val
= gen5_invert_freq(rps
, val
);
420 (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
421 (val
<< MEMCTL_FREQ_SHIFT
) |
423 intel_uncore_write16(uncore
, MEMSWCTL
, rgvswctl
);
424 intel_uncore_posting_read16(uncore
, MEMSWCTL
);
426 rgvswctl
|= MEMCTL_CMD_STS
;
427 intel_uncore_write16(uncore
, MEMSWCTL
, rgvswctl
);
432 static unsigned long intel_pxfreq(u32 vidfreq
)
434 int div
= (vidfreq
& 0x3f0000) >> 16;
435 int post
= (vidfreq
& 0x3000) >> 12;
436 int pre
= (vidfreq
& 0x7);
441 return div
* 133333 / (pre
<< post
);
444 static unsigned int init_emon(struct intel_uncore
*uncore
)
449 /* Disable to program */
450 intel_uncore_write(uncore
, ECR
, 0);
451 intel_uncore_posting_read(uncore
, ECR
);
453 /* Program energy weights for various events */
454 intel_uncore_write(uncore
, SDEW
, 0x15040d00);
455 intel_uncore_write(uncore
, CSIEW0
, 0x007f0000);
456 intel_uncore_write(uncore
, CSIEW1
, 0x1e220004);
457 intel_uncore_write(uncore
, CSIEW2
, 0x04000004);
459 for (i
= 0; i
< 5; i
++)
460 intel_uncore_write(uncore
, PEW(i
), 0);
461 for (i
= 0; i
< 3; i
++)
462 intel_uncore_write(uncore
, DEW(i
), 0);
464 /* Program P-state weights to account for frequency power adjustment */
465 for (i
= 0; i
< 16; i
++) {
466 u32 pxvidfreq
= intel_uncore_read(uncore
, PXVFREQ(i
));
467 unsigned int freq
= intel_pxfreq(pxvidfreq
);
469 (pxvidfreq
& PXVFREQ_PX_MASK
) >> PXVFREQ_PX_SHIFT
;
472 val
= vid
* vid
* freq
/ 1000 * 255;
473 val
/= 127 * 127 * 900;
477 /* Render standby states get 0 weight */
481 for (i
= 0; i
< 4; i
++) {
482 intel_uncore_write(uncore
, PXW(i
),
483 pxw
[i
* 4 + 0] << 24 |
484 pxw
[i
* 4 + 1] << 16 |
485 pxw
[i
* 4 + 2] << 8 |
486 pxw
[i
* 4 + 3] << 0);
489 /* Adjust magic regs to magic values (more experimental results) */
490 intel_uncore_write(uncore
, OGW0
, 0);
491 intel_uncore_write(uncore
, OGW1
, 0);
492 intel_uncore_write(uncore
, EG0
, 0x00007f00);
493 intel_uncore_write(uncore
, EG1
, 0x0000000e);
494 intel_uncore_write(uncore
, EG2
, 0x000e0000);
495 intel_uncore_write(uncore
, EG3
, 0x68000300);
496 intel_uncore_write(uncore
, EG4
, 0x42000000);
497 intel_uncore_write(uncore
, EG5
, 0x00140031);
498 intel_uncore_write(uncore
, EG6
, 0);
499 intel_uncore_write(uncore
, EG7
, 0);
501 for (i
= 0; i
< 8; i
++)
502 intel_uncore_write(uncore
, PXWL(i
), 0);
504 /* Enable PMON + select events */
505 intel_uncore_write(uncore
, ECR
, 0x80000019);
507 return intel_uncore_read(uncore
, LCFUSE02
) & LCFUSE_HIV_MASK
;
510 static bool gen5_rps_enable(struct intel_rps
*rps
)
512 struct drm_i915_private
*i915
= rps_to_i915(rps
);
513 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
517 spin_lock_irq(&mchdev_lock
);
519 rgvmodectl
= intel_uncore_read(uncore
, MEMMODECTL
);
521 /* Enable temp reporting */
522 intel_uncore_write16(uncore
, PMMISC
,
523 intel_uncore_read16(uncore
, PMMISC
) | MCPPCE_EN
);
524 intel_uncore_write16(uncore
, TSC1
,
525 intel_uncore_read16(uncore
, TSC1
) | TSE
);
527 /* 100ms RC evaluation intervals */
528 intel_uncore_write(uncore
, RCUPEI
, 100000);
529 intel_uncore_write(uncore
, RCDNEI
, 100000);
531 /* Set max/min thresholds to 90ms and 80ms respectively */
532 intel_uncore_write(uncore
, RCBMAXAVG
, 90000);
533 intel_uncore_write(uncore
, RCBMINAVG
, 80000);
535 intel_uncore_write(uncore
, MEMIHYST
, 1);
537 /* Set up min, max, and cur for interrupt handling */
538 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
539 MEMMODE_FSTART_SHIFT
;
541 vstart
= (intel_uncore_read(uncore
, PXVFREQ(fstart
)) &
542 PXVFREQ_PX_MASK
) >> PXVFREQ_PX_SHIFT
;
544 intel_uncore_write(uncore
,
546 MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
548 intel_uncore_write(uncore
, VIDSTART
, vstart
);
549 intel_uncore_posting_read(uncore
, VIDSTART
);
551 rgvmodectl
|= MEMMODE_SWMODE_EN
;
552 intel_uncore_write(uncore
, MEMMODECTL
, rgvmodectl
);
554 if (wait_for_atomic((intel_uncore_read(uncore
, MEMSWCTL
) &
555 MEMCTL_CMD_STS
) == 0, 10))
556 drm_err(&uncore
->i915
->drm
,
557 "stuck trying to change perf mode\n");
560 gen5_rps_set(rps
, rps
->cur_freq
);
562 rps
->ips
.last_count1
= intel_uncore_read(uncore
, DMIEC
);
563 rps
->ips
.last_count1
+= intel_uncore_read(uncore
, DDREC
);
564 rps
->ips
.last_count1
+= intel_uncore_read(uncore
, CSIEC
);
565 rps
->ips
.last_time1
= jiffies_to_msecs(jiffies
);
567 rps
->ips
.last_count2
= intel_uncore_read(uncore
, GFXEC
);
568 rps
->ips
.last_time2
= ktime_get_raw_ns();
570 spin_lock(&i915
->irq_lock
);
571 ilk_enable_display_irq(i915
, DE_PCU_EVENT
);
572 spin_unlock(&i915
->irq_lock
);
574 spin_unlock_irq(&mchdev_lock
);
576 rps
->ips
.corr
= init_emon(uncore
);
581 static void gen5_rps_disable(struct intel_rps
*rps
)
583 struct drm_i915_private
*i915
= rps_to_i915(rps
);
584 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
587 spin_lock_irq(&mchdev_lock
);
589 spin_lock(&i915
->irq_lock
);
590 ilk_disable_display_irq(i915
, DE_PCU_EVENT
);
591 spin_unlock(&i915
->irq_lock
);
593 rgvswctl
= intel_uncore_read16(uncore
, MEMSWCTL
);
595 /* Ack interrupts, disable EFC interrupt */
596 intel_uncore_write(uncore
, MEMINTREN
,
597 intel_uncore_read(uncore
, MEMINTREN
) &
598 ~MEMINT_EVAL_CHG_EN
);
599 intel_uncore_write(uncore
, MEMINTRSTS
, MEMINT_EVAL_CHG
);
601 /* Go back to the starting frequency */
602 gen5_rps_set(rps
, rps
->idle_freq
);
604 rgvswctl
|= MEMCTL_CMD_STS
;
605 intel_uncore_write(uncore
, MEMSWCTL
, rgvswctl
);
608 spin_unlock_irq(&mchdev_lock
);
611 static u32
rps_limits(struct intel_rps
*rps
, u8 val
)
616 * Only set the down limit when we've reached the lowest level to avoid
617 * getting more interrupts, otherwise leave this clear. This prevents a
618 * race in the hw when coming out of rc6: There's a tiny window where
619 * the hw runs at the minimal clock before selecting the desired
620 * frequency, if the down threshold expires in that window we will not
621 * receive a down interrupt.
623 if (INTEL_GEN(rps_to_i915(rps
)) >= 9) {
624 limits
= rps
->max_freq_softlimit
<< 23;
625 if (val
<= rps
->min_freq_softlimit
)
626 limits
|= rps
->min_freq_softlimit
<< 14;
628 limits
= rps
->max_freq_softlimit
<< 24;
629 if (val
<= rps
->min_freq_softlimit
)
630 limits
|= rps
->min_freq_softlimit
<< 16;
636 static void rps_set_power(struct intel_rps
*rps
, int new_power
)
638 struct intel_gt
*gt
= rps_to_gt(rps
);
639 struct intel_uncore
*uncore
= gt
->uncore
;
640 u32 threshold_up
= 0, threshold_down
= 0; /* in % */
641 u32 ei_up
= 0, ei_down
= 0;
643 lockdep_assert_held(&rps
->power
.mutex
);
645 if (new_power
== rps
->power
.mode
)
651 /* Note the units here are not exactly 1us, but 1280ns. */
669 /* When byt can survive without system hang with dynamic
670 * sw freq adjustments, this restriction can be lifted.
672 if (IS_VALLEYVIEW(gt
->i915
))
676 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
677 new_power
, threshold_up
, ei_up
, threshold_down
, ei_down
);
679 set(uncore
, GEN6_RP_UP_EI
,
680 intel_gt_ns_to_pm_interval(gt
, ei_up
* 1000));
681 set(uncore
, GEN6_RP_UP_THRESHOLD
,
682 intel_gt_ns_to_pm_interval(gt
, ei_up
* threshold_up
* 10));
684 set(uncore
, GEN6_RP_DOWN_EI
,
685 intel_gt_ns_to_pm_interval(gt
, ei_down
* 1000));
686 set(uncore
, GEN6_RP_DOWN_THRESHOLD
,
687 intel_gt_ns_to_pm_interval(gt
, ei_down
* threshold_down
* 10));
689 set(uncore
, GEN6_RP_CONTROL
,
690 (INTEL_GEN(gt
->i915
) > 9 ? 0 : GEN6_RP_MEDIA_TURBO
) |
691 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
692 GEN6_RP_MEDIA_IS_GFX
|
694 GEN6_RP_UP_BUSY_AVG
|
695 GEN6_RP_DOWN_IDLE_AVG
);
698 rps
->power
.mode
= new_power
;
699 rps
->power
.up_threshold
= threshold_up
;
700 rps
->power
.down_threshold
= threshold_down
;
703 static void gen6_rps_set_thresholds(struct intel_rps
*rps
, u8 val
)
707 new_power
= rps
->power
.mode
;
708 switch (rps
->power
.mode
) {
710 if (val
> rps
->efficient_freq
+ 1 &&
716 if (val
<= rps
->efficient_freq
&&
718 new_power
= LOW_POWER
;
719 else if (val
>= rps
->rp0_freq
&&
721 new_power
= HIGH_POWER
;
725 if (val
< (rps
->rp1_freq
+ rps
->rp0_freq
) >> 1 &&
730 /* Max/min bins are special */
731 if (val
<= rps
->min_freq_softlimit
)
732 new_power
= LOW_POWER
;
733 if (val
>= rps
->max_freq_softlimit
)
734 new_power
= HIGH_POWER
;
736 mutex_lock(&rps
->power
.mutex
);
737 if (rps
->power
.interactive
)
738 new_power
= HIGH_POWER
;
739 rps_set_power(rps
, new_power
);
740 mutex_unlock(&rps
->power
.mutex
);
743 void intel_rps_mark_interactive(struct intel_rps
*rps
, bool interactive
)
745 GT_TRACE(rps_to_gt(rps
), "mark interactive: %s\n", yesno(interactive
));
747 mutex_lock(&rps
->power
.mutex
);
749 if (!rps
->power
.interactive
++ && intel_rps_is_active(rps
))
750 rps_set_power(rps
, HIGH_POWER
);
752 GEM_BUG_ON(!rps
->power
.interactive
);
753 rps
->power
.interactive
--;
755 mutex_unlock(&rps
->power
.mutex
);
758 static int gen6_rps_set(struct intel_rps
*rps
, u8 val
)
760 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
761 struct drm_i915_private
*i915
= rps_to_i915(rps
);
764 if (INTEL_GEN(i915
) >= 9)
765 swreq
= GEN9_FREQUENCY(val
);
766 else if (IS_HASWELL(i915
) || IS_BROADWELL(i915
))
767 swreq
= HSW_FREQUENCY(val
);
769 swreq
= (GEN6_FREQUENCY(val
) |
771 GEN6_AGGRESSIVE_TURBO
);
772 set(uncore
, GEN6_RPNSWREQ
, swreq
);
774 GT_TRACE(rps_to_gt(rps
), "set val:%x, freq:%d, swreq:%x\n",
775 val
, intel_gpu_freq(rps
, val
), swreq
);
780 static int vlv_rps_set(struct intel_rps
*rps
, u8 val
)
782 struct drm_i915_private
*i915
= rps_to_i915(rps
);
786 err
= vlv_punit_write(i915
, PUNIT_REG_GPU_FREQ_REQ
, val
);
789 GT_TRACE(rps_to_gt(rps
), "set val:%x, freq:%d\n",
790 val
, intel_gpu_freq(rps
, val
));
795 static int rps_set(struct intel_rps
*rps
, u8 val
, bool update
)
797 struct drm_i915_private
*i915
= rps_to_i915(rps
);
800 if (INTEL_GEN(i915
) < 6)
803 if (val
== rps
->last_freq
)
806 if (IS_VALLEYVIEW(i915
) || IS_CHERRYVIEW(i915
))
807 err
= vlv_rps_set(rps
, val
);
809 err
= gen6_rps_set(rps
, val
);
814 gen6_rps_set_thresholds(rps
, val
);
815 rps
->last_freq
= val
;
820 void intel_rps_unpark(struct intel_rps
*rps
)
822 if (!intel_rps_is_enabled(rps
))
825 GT_TRACE(rps_to_gt(rps
), "unpark:%x\n", rps
->cur_freq
);
828 * Use the user's desired frequency as a guide, but for better
829 * performance, jump directly to RPe as our starting frequency.
831 mutex_lock(&rps
->lock
);
833 intel_rps_set_active(rps
);
836 rps
->min_freq_softlimit
,
837 rps
->max_freq_softlimit
));
839 mutex_unlock(&rps
->lock
);
842 if (intel_rps_has_interrupts(rps
))
843 rps_enable_interrupts(rps
);
844 if (intel_rps_uses_timer(rps
))
845 rps_start_timer(rps
);
847 if (IS_GEN(rps_to_i915(rps
), 5))
848 gen5_rps_update(rps
);
851 void intel_rps_park(struct intel_rps
*rps
)
855 if (!intel_rps_clear_active(rps
))
858 if (intel_rps_uses_timer(rps
))
860 if (intel_rps_has_interrupts(rps
))
861 rps_disable_interrupts(rps
);
863 if (rps
->last_freq
<= rps
->idle_freq
)
867 * The punit delays the write of the frequency and voltage until it
868 * determines the GPU is awake. During normal usage we don't want to
869 * waste power changing the frequency if the GPU is sleeping (rc6).
870 * However, the GPU and driver is now idle and we do not want to delay
871 * switching to minimum voltage (reducing power whilst idle) as we do
872 * not expect to be woken in the near future and so must flush the
873 * change by waking the device.
875 * We choose to take the media powerwell (either would do to trick the
876 * punit into committing the voltage change) as that takes a lot less
877 * power than the render powerwell.
879 intel_uncore_forcewake_get(rps_to_uncore(rps
), FORCEWAKE_MEDIA
);
880 rps_set(rps
, rps
->idle_freq
, false);
881 intel_uncore_forcewake_put(rps_to_uncore(rps
), FORCEWAKE_MEDIA
);
884 * Since we will try and restart from the previously requested
885 * frequency on unparking, treat this idle point as a downclock
886 * interrupt and reduce the frequency for resume. If we park/unpark
887 * more frequently than the rps worker can run, we will not respond
888 * to any EI and never see a change in frequency.
890 * (Note we accommodate Cherryview's limitation of only using an
891 * even bin by applying it to all.)
896 else /* CHV needs even encode values */
899 rps
->cur_freq
= max_t(int, rps
->cur_freq
+ adj
, rps
->min_freq
);
900 if (rps
->cur_freq
< rps
->efficient_freq
) {
901 rps
->cur_freq
= rps
->efficient_freq
;
905 GT_TRACE(rps_to_gt(rps
), "park:%x\n", rps
->cur_freq
);
908 void intel_rps_boost(struct i915_request
*rq
)
910 struct intel_rps
*rps
= &READ_ONCE(rq
->engine
)->gt
->rps
;
913 if (i915_request_signaled(rq
) || !intel_rps_is_active(rps
))
916 /* Serializes with i915_request_retire() */
917 spin_lock_irqsave(&rq
->lock
, flags
);
918 if (!i915_request_has_waitboost(rq
) &&
919 !dma_fence_is_signaled_locked(&rq
->fence
)) {
920 set_bit(I915_FENCE_FLAG_BOOST
, &rq
->fence
.flags
);
922 GT_TRACE(rps_to_gt(rps
), "boost fence:%llx:%llx\n",
923 rq
->fence
.context
, rq
->fence
.seqno
);
925 if (!atomic_fetch_inc(&rps
->num_waiters
) &&
926 READ_ONCE(rps
->cur_freq
) < rps
->boost_freq
)
927 schedule_work(&rps
->work
);
929 atomic_inc(&rps
->boosts
);
931 spin_unlock_irqrestore(&rq
->lock
, flags
);
934 int intel_rps_set(struct intel_rps
*rps
, u8 val
)
938 lockdep_assert_held(&rps
->lock
);
939 GEM_BUG_ON(val
> rps
->max_freq
);
940 GEM_BUG_ON(val
< rps
->min_freq
);
942 if (intel_rps_is_active(rps
)) {
943 err
= rps_set(rps
, val
, true);
948 * Make sure we continue to get interrupts
949 * until we hit the minimum or maximum frequencies.
951 if (intel_rps_has_interrupts(rps
)) {
952 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
955 GEN6_RP_INTERRUPT_LIMITS
, rps_limits(rps
, val
));
957 set(uncore
, GEN6_PMINTRMSK
, rps_pm_mask(rps
, val
));
965 static void gen6_rps_init(struct intel_rps
*rps
)
967 struct drm_i915_private
*i915
= rps_to_i915(rps
);
968 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
970 /* All of these values are in units of 50MHz */
972 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
973 if (IS_GEN9_LP(i915
)) {
974 u32 rp_state_cap
= intel_uncore_read(uncore
, BXT_RP_STATE_CAP
);
976 rps
->rp0_freq
= (rp_state_cap
>> 16) & 0xff;
977 rps
->rp1_freq
= (rp_state_cap
>> 8) & 0xff;
978 rps
->min_freq
= (rp_state_cap
>> 0) & 0xff;
980 u32 rp_state_cap
= intel_uncore_read(uncore
, GEN6_RP_STATE_CAP
);
982 rps
->rp0_freq
= (rp_state_cap
>> 0) & 0xff;
983 rps
->rp1_freq
= (rp_state_cap
>> 8) & 0xff;
984 rps
->min_freq
= (rp_state_cap
>> 16) & 0xff;
987 /* hw_max = RP0 until we check for overclocking */
988 rps
->max_freq
= rps
->rp0_freq
;
990 rps
->efficient_freq
= rps
->rp1_freq
;
991 if (IS_HASWELL(i915
) || IS_BROADWELL(i915
) ||
992 IS_GEN9_BC(i915
) || INTEL_GEN(i915
) >= 10) {
995 if (sandybridge_pcode_read(i915
,
996 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL
,
997 &ddcc_status
, NULL
) == 0)
998 rps
->efficient_freq
=
1000 (ddcc_status
>> 8) & 0xff,
1005 if (IS_GEN9_BC(i915
) || INTEL_GEN(i915
) >= 10) {
1006 /* Store the frequency values in 16.66 MHZ units, which is
1007 * the natural hardware unit for SKL
1009 rps
->rp0_freq
*= GEN9_FREQ_SCALER
;
1010 rps
->rp1_freq
*= GEN9_FREQ_SCALER
;
1011 rps
->min_freq
*= GEN9_FREQ_SCALER
;
1012 rps
->max_freq
*= GEN9_FREQ_SCALER
;
1013 rps
->efficient_freq
*= GEN9_FREQ_SCALER
;
1017 static bool rps_reset(struct intel_rps
*rps
)
1019 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1022 rps
->power
.mode
= -1;
1023 rps
->last_freq
= -1;
1025 if (rps_set(rps
, rps
->min_freq
, true)) {
1026 drm_err(&i915
->drm
, "Failed to reset RPS to initial values\n");
1030 rps
->cur_freq
= rps
->min_freq
;
1034 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
1035 static bool gen9_rps_enable(struct intel_rps
*rps
)
1037 struct intel_gt
*gt
= rps_to_gt(rps
);
1038 struct intel_uncore
*uncore
= gt
->uncore
;
1040 /* Program defaults and thresholds for RPS */
1041 if (IS_GEN(gt
->i915
, 9))
1042 intel_uncore_write_fw(uncore
, GEN6_RC_VIDEO_FREQ
,
1043 GEN9_FREQUENCY(rps
->rp1_freq
));
1045 intel_uncore_write_fw(uncore
, GEN6_RP_IDLE_HYSTERSIS
, 0xa);
1047 rps
->pm_events
= GEN6_PM_RP_UP_THRESHOLD
| GEN6_PM_RP_DOWN_THRESHOLD
;
1049 return rps_reset(rps
);
1052 static bool gen8_rps_enable(struct intel_rps
*rps
)
1054 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1056 intel_uncore_write_fw(uncore
, GEN6_RC_VIDEO_FREQ
,
1057 HSW_FREQUENCY(rps
->rp1_freq
));
1059 intel_uncore_write_fw(uncore
, GEN6_RP_IDLE_HYSTERSIS
, 10);
1061 rps
->pm_events
= GEN6_PM_RP_UP_THRESHOLD
| GEN6_PM_RP_DOWN_THRESHOLD
;
1063 return rps_reset(rps
);
1066 static bool gen6_rps_enable(struct intel_rps
*rps
)
1068 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1070 /* Power down if completely idle for over 50ms */
1071 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_TIMEOUT
, 50000);
1072 intel_uncore_write_fw(uncore
, GEN6_RP_IDLE_HYSTERSIS
, 10);
1074 rps
->pm_events
= (GEN6_PM_RP_UP_THRESHOLD
|
1075 GEN6_PM_RP_DOWN_THRESHOLD
|
1076 GEN6_PM_RP_DOWN_TIMEOUT
);
1078 return rps_reset(rps
);
1081 static int chv_rps_max_freq(struct intel_rps
*rps
)
1083 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1084 struct intel_gt
*gt
= rps_to_gt(rps
);
1087 val
= vlv_punit_read(i915
, FB_GFX_FMAX_AT_VMAX_FUSE
);
1089 switch (gt
->info
.sseu
.eu_total
) {
1091 /* (2 * 4) config */
1092 val
>>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT
;
1095 /* (2 * 6) config */
1096 val
>>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT
;
1099 /* (2 * 8) config */
1101 /* Setting (2 * 8) Min RP0 for any other combination */
1102 val
>>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT
;
1106 return val
& FB_GFX_FREQ_FUSE_MASK
;
1109 static int chv_rps_rpe_freq(struct intel_rps
*rps
)
1111 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1114 val
= vlv_punit_read(i915
, PUNIT_GPU_DUTYCYCLE_REG
);
1115 val
>>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT
;
1117 return val
& PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK
;
1120 static int chv_rps_guar_freq(struct intel_rps
*rps
)
1122 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1125 val
= vlv_punit_read(i915
, FB_GFX_FMAX_AT_VMAX_FUSE
);
1127 return val
& FB_GFX_FREQ_FUSE_MASK
;
1130 static u32
chv_rps_min_freq(struct intel_rps
*rps
)
1132 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1135 val
= vlv_punit_read(i915
, FB_GFX_FMIN_AT_VMIN_FUSE
);
1136 val
>>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT
;
1138 return val
& FB_GFX_FREQ_FUSE_MASK
;
1141 static bool chv_rps_enable(struct intel_rps
*rps
)
1143 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1144 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1147 /* 1: Program defaults and thresholds for RPS*/
1148 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_TIMEOUT
, 1000000);
1149 intel_uncore_write_fw(uncore
, GEN6_RP_UP_THRESHOLD
, 59400);
1150 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_THRESHOLD
, 245000);
1151 intel_uncore_write_fw(uncore
, GEN6_RP_UP_EI
, 66000);
1152 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_EI
, 350000);
1154 intel_uncore_write_fw(uncore
, GEN6_RP_IDLE_HYSTERSIS
, 10);
1157 intel_uncore_write_fw(uncore
, GEN6_RP_CONTROL
,
1158 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
1159 GEN6_RP_MEDIA_IS_GFX
|
1161 GEN6_RP_UP_BUSY_AVG
|
1162 GEN6_RP_DOWN_IDLE_AVG
);
1164 rps
->pm_events
= (GEN6_PM_RP_UP_THRESHOLD
|
1165 GEN6_PM_RP_DOWN_THRESHOLD
|
1166 GEN6_PM_RP_DOWN_TIMEOUT
);
1168 /* Setting Fixed Bias */
1169 vlv_punit_get(i915
);
1171 val
= VLV_OVERRIDE_EN
| VLV_SOC_TDP_EN
| CHV_BIAS_CPU_50_SOC_50
;
1172 vlv_punit_write(i915
, VLV_TURBO_SOC_OVERRIDE
, val
);
1174 val
= vlv_punit_read(i915
, PUNIT_REG_GPU_FREQ_STS
);
1176 vlv_punit_put(i915
);
1178 /* RPS code assumes GPLL is used */
1179 drm_WARN_ONCE(&i915
->drm
, (val
& GPLLENABLE
) == 0,
1180 "GPLL not enabled\n");
1182 drm_dbg(&i915
->drm
, "GPLL enabled? %s\n", yesno(val
& GPLLENABLE
));
1183 drm_dbg(&i915
->drm
, "GPU status: 0x%08x\n", val
);
1185 return rps_reset(rps
);
1188 static int vlv_rps_guar_freq(struct intel_rps
*rps
)
1190 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1193 val
= vlv_nc_read(i915
, IOSF_NC_FB_GFX_FREQ_FUSE
);
1195 rp1
= val
& FB_GFX_FGUARANTEED_FREQ_FUSE_MASK
;
1196 rp1
>>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT
;
1201 static int vlv_rps_max_freq(struct intel_rps
*rps
)
1203 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1206 val
= vlv_nc_read(i915
, IOSF_NC_FB_GFX_FREQ_FUSE
);
1208 rp0
= (val
& FB_GFX_MAX_FREQ_FUSE_MASK
) >> FB_GFX_MAX_FREQ_FUSE_SHIFT
;
1210 rp0
= min_t(u32
, rp0
, 0xea);
1215 static int vlv_rps_rpe_freq(struct intel_rps
*rps
)
1217 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1220 val
= vlv_nc_read(i915
, IOSF_NC_FB_GFX_FMAX_FUSE_LO
);
1221 rpe
= (val
& FB_FMAX_VMIN_FREQ_LO_MASK
) >> FB_FMAX_VMIN_FREQ_LO_SHIFT
;
1222 val
= vlv_nc_read(i915
, IOSF_NC_FB_GFX_FMAX_FUSE_HI
);
1223 rpe
|= (val
& FB_FMAX_VMIN_FREQ_HI_MASK
) << 5;
1228 static int vlv_rps_min_freq(struct intel_rps
*rps
)
1230 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1233 val
= vlv_punit_read(i915
, PUNIT_REG_GPU_LFM
) & 0xff;
1235 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
1236 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
1237 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
1238 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
1239 * to make sure it matches what Punit accepts.
1241 return max_t(u32
, val
, 0xc0);
1244 static bool vlv_rps_enable(struct intel_rps
*rps
)
1246 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1247 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1250 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_TIMEOUT
, 1000000);
1251 intel_uncore_write_fw(uncore
, GEN6_RP_UP_THRESHOLD
, 59400);
1252 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_THRESHOLD
, 245000);
1253 intel_uncore_write_fw(uncore
, GEN6_RP_UP_EI
, 66000);
1254 intel_uncore_write_fw(uncore
, GEN6_RP_DOWN_EI
, 350000);
1256 intel_uncore_write_fw(uncore
, GEN6_RP_IDLE_HYSTERSIS
, 10);
1258 intel_uncore_write_fw(uncore
, GEN6_RP_CONTROL
,
1259 GEN6_RP_MEDIA_TURBO
|
1260 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
1261 GEN6_RP_MEDIA_IS_GFX
|
1263 GEN6_RP_UP_BUSY_AVG
|
1264 GEN6_RP_DOWN_IDLE_CONT
);
1266 /* WaGsvRC0ResidencyMethod:vlv */
1267 rps
->pm_events
= GEN6_PM_RP_UP_EI_EXPIRED
;
1269 vlv_punit_get(i915
);
1271 /* Setting Fixed Bias */
1272 val
= VLV_OVERRIDE_EN
| VLV_SOC_TDP_EN
| VLV_BIAS_CPU_125_SOC_875
;
1273 vlv_punit_write(i915
, VLV_TURBO_SOC_OVERRIDE
, val
);
1275 val
= vlv_punit_read(i915
, PUNIT_REG_GPU_FREQ_STS
);
1277 vlv_punit_put(i915
);
1279 /* RPS code assumes GPLL is used */
1280 drm_WARN_ONCE(&i915
->drm
, (val
& GPLLENABLE
) == 0,
1281 "GPLL not enabled\n");
1283 drm_dbg(&i915
->drm
, "GPLL enabled? %s\n", yesno(val
& GPLLENABLE
));
1284 drm_dbg(&i915
->drm
, "GPU status: 0x%08x\n", val
);
1286 return rps_reset(rps
);
1289 static unsigned long __ips_gfx_val(struct intel_ips
*ips
)
1291 struct intel_rps
*rps
= container_of(ips
, typeof(*rps
), ips
);
1292 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1293 unsigned int t
, state1
, state2
;
1297 lockdep_assert_held(&mchdev_lock
);
1299 pxvid
= intel_uncore_read(uncore
, PXVFREQ(rps
->cur_freq
));
1300 pxvid
= (pxvid
>> 24) & 0x7f;
1301 ext_v
= pvid_to_extvid(rps_to_i915(rps
), pxvid
);
1305 /* Revel in the empirically derived constants */
1307 /* Correction factor in 1/100000 units */
1308 t
= ips_mch_val(uncore
);
1310 corr
= t
* 2349 + 135940;
1312 corr
= t
* 964 + 29317;
1314 corr
= t
* 301 + 1004;
1316 corr
= div_u64(corr
* 150142 * state1
, 10000) - 78642;
1317 corr2
= div_u64(corr
, 100000) * ips
->corr
;
1319 state2
= div_u64(corr2
* state1
, 10000);
1320 state2
/= 100; /* convert to mW */
1322 __gen5_ips_update(ips
);
1324 return ips
->gfx_power
+ state2
;
1327 static bool has_busy_stats(struct intel_rps
*rps
)
1329 struct intel_engine_cs
*engine
;
1330 enum intel_engine_id id
;
1332 for_each_engine(engine
, rps_to_gt(rps
), id
) {
1333 if (!intel_engine_supports_stats(engine
))
1340 void intel_rps_enable(struct intel_rps
*rps
)
1342 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1343 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1344 bool enabled
= false;
1349 intel_gt_check_clock_frequency(rps_to_gt(rps
));
1351 intel_uncore_forcewake_get(uncore
, FORCEWAKE_ALL
);
1352 if (rps
->max_freq
<= rps
->min_freq
)
1353 /* leave disabled, no room for dynamic reclocking */;
1354 else if (IS_CHERRYVIEW(i915
))
1355 enabled
= chv_rps_enable(rps
);
1356 else if (IS_VALLEYVIEW(i915
))
1357 enabled
= vlv_rps_enable(rps
);
1358 else if (INTEL_GEN(i915
) >= 9)
1359 enabled
= gen9_rps_enable(rps
);
1360 else if (INTEL_GEN(i915
) >= 8)
1361 enabled
= gen8_rps_enable(rps
);
1362 else if (INTEL_GEN(i915
) >= 6)
1363 enabled
= gen6_rps_enable(rps
);
1364 else if (IS_IRONLAKE_M(i915
))
1365 enabled
= gen5_rps_enable(rps
);
1367 MISSING_CASE(INTEL_GEN(i915
));
1368 intel_uncore_forcewake_put(uncore
, FORCEWAKE_ALL
);
1372 GT_TRACE(rps_to_gt(rps
),
1373 "min:%x, max:%x, freq:[%d, %d]\n",
1374 rps
->min_freq
, rps
->max_freq
,
1375 intel_gpu_freq(rps
, rps
->min_freq
),
1376 intel_gpu_freq(rps
, rps
->max_freq
));
1378 GEM_BUG_ON(rps
->max_freq
< rps
->min_freq
);
1379 GEM_BUG_ON(rps
->idle_freq
> rps
->max_freq
);
1381 GEM_BUG_ON(rps
->efficient_freq
< rps
->min_freq
);
1382 GEM_BUG_ON(rps
->efficient_freq
> rps
->max_freq
);
1384 if (has_busy_stats(rps
))
1385 intel_rps_set_timer(rps
);
1386 else if (INTEL_GEN(i915
) >= 6)
1387 intel_rps_set_interrupts(rps
);
1389 /* Ironlake currently uses intel_ips.ko */ {}
1391 intel_rps_set_enabled(rps
);
1394 static void gen6_rps_disable(struct intel_rps
*rps
)
1396 set(rps_to_uncore(rps
), GEN6_RP_CONTROL
, 0);
1399 void intel_rps_disable(struct intel_rps
*rps
)
1401 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1403 intel_rps_clear_enabled(rps
);
1404 intel_rps_clear_interrupts(rps
);
1405 intel_rps_clear_timer(rps
);
1407 if (INTEL_GEN(i915
) >= 6)
1408 gen6_rps_disable(rps
);
1409 else if (IS_IRONLAKE_M(i915
))
1410 gen5_rps_disable(rps
);
1413 static int byt_gpu_freq(struct intel_rps
*rps
, int val
)
1417 * Slow = Fast = GPLL ref * N
1419 return DIV_ROUND_CLOSEST(rps
->gpll_ref_freq
* (val
- 0xb7), 1000);
1422 static int byt_freq_opcode(struct intel_rps
*rps
, int val
)
1424 return DIV_ROUND_CLOSEST(1000 * val
, rps
->gpll_ref_freq
) + 0xb7;
1427 static int chv_gpu_freq(struct intel_rps
*rps
, int val
)
1431 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
1433 return DIV_ROUND_CLOSEST(rps
->gpll_ref_freq
* val
, 2 * 2 * 1000);
1436 static int chv_freq_opcode(struct intel_rps
*rps
, int val
)
1438 /* CHV needs even values */
1439 return DIV_ROUND_CLOSEST(2 * 1000 * val
, rps
->gpll_ref_freq
) * 2;
1442 int intel_gpu_freq(struct intel_rps
*rps
, int val
)
1444 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1446 if (INTEL_GEN(i915
) >= 9)
1447 return DIV_ROUND_CLOSEST(val
* GT_FREQUENCY_MULTIPLIER
,
1449 else if (IS_CHERRYVIEW(i915
))
1450 return chv_gpu_freq(rps
, val
);
1451 else if (IS_VALLEYVIEW(i915
))
1452 return byt_gpu_freq(rps
, val
);
1453 else if (INTEL_GEN(i915
) >= 6)
1454 return val
* GT_FREQUENCY_MULTIPLIER
;
1459 int intel_freq_opcode(struct intel_rps
*rps
, int val
)
1461 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1463 if (INTEL_GEN(i915
) >= 9)
1464 return DIV_ROUND_CLOSEST(val
* GEN9_FREQ_SCALER
,
1465 GT_FREQUENCY_MULTIPLIER
);
1466 else if (IS_CHERRYVIEW(i915
))
1467 return chv_freq_opcode(rps
, val
);
1468 else if (IS_VALLEYVIEW(i915
))
1469 return byt_freq_opcode(rps
, val
);
1470 else if (INTEL_GEN(i915
) >= 6)
1471 return DIV_ROUND_CLOSEST(val
, GT_FREQUENCY_MULTIPLIER
);
1476 static void vlv_init_gpll_ref_freq(struct intel_rps
*rps
)
1478 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1480 rps
->gpll_ref_freq
=
1481 vlv_get_cck_clock(i915
, "GPLL ref",
1482 CCK_GPLL_CLOCK_CONTROL
,
1485 drm_dbg(&i915
->drm
, "GPLL reference freq: %d kHz\n",
1486 rps
->gpll_ref_freq
);
1489 static void vlv_rps_init(struct intel_rps
*rps
)
1491 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1494 vlv_iosf_sb_get(i915
,
1495 BIT(VLV_IOSF_SB_PUNIT
) |
1496 BIT(VLV_IOSF_SB_NC
) |
1497 BIT(VLV_IOSF_SB_CCK
));
1499 vlv_init_gpll_ref_freq(rps
);
1501 val
= vlv_punit_read(i915
, PUNIT_REG_GPU_FREQ_STS
);
1502 switch ((val
>> 6) & 3) {
1505 i915
->mem_freq
= 800;
1508 i915
->mem_freq
= 1066;
1511 i915
->mem_freq
= 1333;
1514 drm_dbg(&i915
->drm
, "DDR speed: %d MHz\n", i915
->mem_freq
);
1516 rps
->max_freq
= vlv_rps_max_freq(rps
);
1517 rps
->rp0_freq
= rps
->max_freq
;
1518 drm_dbg(&i915
->drm
, "max GPU freq: %d MHz (%u)\n",
1519 intel_gpu_freq(rps
, rps
->max_freq
), rps
->max_freq
);
1521 rps
->efficient_freq
= vlv_rps_rpe_freq(rps
);
1522 drm_dbg(&i915
->drm
, "RPe GPU freq: %d MHz (%u)\n",
1523 intel_gpu_freq(rps
, rps
->efficient_freq
), rps
->efficient_freq
);
1525 rps
->rp1_freq
= vlv_rps_guar_freq(rps
);
1526 drm_dbg(&i915
->drm
, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
1527 intel_gpu_freq(rps
, rps
->rp1_freq
), rps
->rp1_freq
);
1529 rps
->min_freq
= vlv_rps_min_freq(rps
);
1530 drm_dbg(&i915
->drm
, "min GPU freq: %d MHz (%u)\n",
1531 intel_gpu_freq(rps
, rps
->min_freq
), rps
->min_freq
);
1533 vlv_iosf_sb_put(i915
,
1534 BIT(VLV_IOSF_SB_PUNIT
) |
1535 BIT(VLV_IOSF_SB_NC
) |
1536 BIT(VLV_IOSF_SB_CCK
));
1539 static void chv_rps_init(struct intel_rps
*rps
)
1541 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1544 vlv_iosf_sb_get(i915
,
1545 BIT(VLV_IOSF_SB_PUNIT
) |
1546 BIT(VLV_IOSF_SB_NC
) |
1547 BIT(VLV_IOSF_SB_CCK
));
1549 vlv_init_gpll_ref_freq(rps
);
1551 val
= vlv_cck_read(i915
, CCK_FUSE_REG
);
1553 switch ((val
>> 2) & 0x7) {
1555 i915
->mem_freq
= 2000;
1558 i915
->mem_freq
= 1600;
1561 drm_dbg(&i915
->drm
, "DDR speed: %d MHz\n", i915
->mem_freq
);
1563 rps
->max_freq
= chv_rps_max_freq(rps
);
1564 rps
->rp0_freq
= rps
->max_freq
;
1565 drm_dbg(&i915
->drm
, "max GPU freq: %d MHz (%u)\n",
1566 intel_gpu_freq(rps
, rps
->max_freq
), rps
->max_freq
);
1568 rps
->efficient_freq
= chv_rps_rpe_freq(rps
);
1569 drm_dbg(&i915
->drm
, "RPe GPU freq: %d MHz (%u)\n",
1570 intel_gpu_freq(rps
, rps
->efficient_freq
), rps
->efficient_freq
);
1572 rps
->rp1_freq
= chv_rps_guar_freq(rps
);
1573 drm_dbg(&i915
->drm
, "RP1(Guar) GPU freq: %d MHz (%u)\n",
1574 intel_gpu_freq(rps
, rps
->rp1_freq
), rps
->rp1_freq
);
1576 rps
->min_freq
= chv_rps_min_freq(rps
);
1577 drm_dbg(&i915
->drm
, "min GPU freq: %d MHz (%u)\n",
1578 intel_gpu_freq(rps
, rps
->min_freq
), rps
->min_freq
);
1580 vlv_iosf_sb_put(i915
,
1581 BIT(VLV_IOSF_SB_PUNIT
) |
1582 BIT(VLV_IOSF_SB_NC
) |
1583 BIT(VLV_IOSF_SB_CCK
));
1585 drm_WARN_ONCE(&i915
->drm
, (rps
->max_freq
| rps
->efficient_freq
|
1586 rps
->rp1_freq
| rps
->min_freq
) & 1,
1587 "Odd GPU freq values\n");
1590 static void vlv_c0_read(struct intel_uncore
*uncore
, struct intel_rps_ei
*ei
)
1592 ei
->ktime
= ktime_get_raw();
1593 ei
->render_c0
= intel_uncore_read(uncore
, VLV_RENDER_C0_COUNT
);
1594 ei
->media_c0
= intel_uncore_read(uncore
, VLV_MEDIA_C0_COUNT
);
1597 static u32
vlv_wa_c0_ei(struct intel_rps
*rps
, u32 pm_iir
)
1599 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1600 const struct intel_rps_ei
*prev
= &rps
->ei
;
1601 struct intel_rps_ei now
;
1604 if ((pm_iir
& GEN6_PM_RP_UP_EI_EXPIRED
) == 0)
1607 vlv_c0_read(uncore
, &now
);
1613 time
= ktime_us_delta(now
.ktime
, prev
->ktime
);
1615 time
*= rps_to_i915(rps
)->czclk_freq
;
1617 /* Workload can be split between render + media,
1618 * e.g. SwapBuffers being blitted in X after being rendered in
1619 * mesa. To account for this we need to combine both engines
1620 * into our activity counter.
1622 render
= now
.render_c0
- prev
->render_c0
;
1623 media
= now
.media_c0
- prev
->media_c0
;
1624 c0
= max(render
, media
);
1625 c0
*= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1627 if (c0
> time
* rps
->power
.up_threshold
)
1628 events
= GEN6_PM_RP_UP_THRESHOLD
;
1629 else if (c0
< time
* rps
->power
.down_threshold
)
1630 events
= GEN6_PM_RP_DOWN_THRESHOLD
;
1637 static void rps_work(struct work_struct
*work
)
1639 struct intel_rps
*rps
= container_of(work
, typeof(*rps
), work
);
1640 struct intel_gt
*gt
= rps_to_gt(rps
);
1641 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1642 bool client_boost
= false;
1643 int new_freq
, adj
, min
, max
;
1646 spin_lock_irq(>
->irq_lock
);
1647 pm_iir
= fetch_and_zero(&rps
->pm_iir
) & rps
->pm_events
;
1648 client_boost
= atomic_read(&rps
->num_waiters
);
1649 spin_unlock_irq(>
->irq_lock
);
1651 /* Make sure we didn't queue anything we're not going to process. */
1652 if (!pm_iir
&& !client_boost
)
1655 mutex_lock(&rps
->lock
);
1656 if (!intel_rps_is_active(rps
)) {
1657 mutex_unlock(&rps
->lock
);
1661 pm_iir
|= vlv_wa_c0_ei(rps
, pm_iir
);
1663 adj
= rps
->last_adj
;
1664 new_freq
= rps
->cur_freq
;
1665 min
= rps
->min_freq_softlimit
;
1666 max
= rps
->max_freq_softlimit
;
1668 max
= rps
->max_freq
;
1671 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
1672 pm_iir
, yesno(client_boost
),
1673 adj
, new_freq
, min
, max
);
1675 if (client_boost
&& new_freq
< rps
->boost_freq
) {
1676 new_freq
= rps
->boost_freq
;
1678 } else if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
1681 else /* CHV needs even encode values */
1682 adj
= IS_CHERRYVIEW(gt
->i915
) ? 2 : 1;
1684 if (new_freq
>= rps
->max_freq_softlimit
)
1686 } else if (client_boost
) {
1688 } else if (pm_iir
& GEN6_PM_RP_DOWN_TIMEOUT
) {
1689 if (rps
->cur_freq
> rps
->efficient_freq
)
1690 new_freq
= rps
->efficient_freq
;
1691 else if (rps
->cur_freq
> rps
->min_freq_softlimit
)
1692 new_freq
= rps
->min_freq_softlimit
;
1694 } else if (pm_iir
& GEN6_PM_RP_DOWN_THRESHOLD
) {
1697 else /* CHV needs even encode values */
1698 adj
= IS_CHERRYVIEW(gt
->i915
) ? -2 : -1;
1700 if (new_freq
<= rps
->min_freq_softlimit
)
1702 } else { /* unknown event */
1707 * sysfs frequency limits may have snuck in while
1708 * servicing the interrupt
1711 new_freq
= clamp_t(int, new_freq
, min
, max
);
1713 if (intel_rps_set(rps
, new_freq
)) {
1714 drm_dbg(&i915
->drm
, "Failed to set new GPU frequency\n");
1717 rps
->last_adj
= adj
;
1719 mutex_unlock(&rps
->lock
);
1722 spin_lock_irq(>
->irq_lock
);
1723 gen6_gt_pm_unmask_irq(gt
, rps
->pm_events
);
1724 spin_unlock_irq(>
->irq_lock
);
1727 void gen11_rps_irq_handler(struct intel_rps
*rps
, u32 pm_iir
)
1729 struct intel_gt
*gt
= rps_to_gt(rps
);
1730 const u32 events
= rps
->pm_events
& pm_iir
;
1732 lockdep_assert_held(>
->irq_lock
);
1734 if (unlikely(!events
))
1737 GT_TRACE(gt
, "irq events:%x\n", events
);
1739 gen6_gt_pm_mask_irq(gt
, events
);
1741 rps
->pm_iir
|= events
;
1742 schedule_work(&rps
->work
);
1745 void gen6_rps_irq_handler(struct intel_rps
*rps
, u32 pm_iir
)
1747 struct intel_gt
*gt
= rps_to_gt(rps
);
1750 events
= pm_iir
& rps
->pm_events
;
1752 spin_lock(>
->irq_lock
);
1754 GT_TRACE(gt
, "irq events:%x\n", events
);
1756 gen6_gt_pm_mask_irq(gt
, events
);
1757 rps
->pm_iir
|= events
;
1759 schedule_work(&rps
->work
);
1760 spin_unlock(>
->irq_lock
);
1763 if (INTEL_GEN(gt
->i915
) >= 8)
1766 if (pm_iir
& PM_VEBOX_USER_INTERRUPT
)
1767 intel_engine_signal_breadcrumbs(gt
->engine
[VECS0
]);
1769 if (pm_iir
& PM_VEBOX_CS_ERROR_INTERRUPT
)
1770 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir
);
1773 void gen5_rps_irq_handler(struct intel_rps
*rps
)
1775 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1776 u32 busy_up
, busy_down
, max_avg
, min_avg
;
1779 spin_lock(&mchdev_lock
);
1781 intel_uncore_write16(uncore
,
1783 intel_uncore_read(uncore
, MEMINTRSTS
));
1785 intel_uncore_write16(uncore
, MEMINTRSTS
, MEMINT_EVAL_CHG
);
1786 busy_up
= intel_uncore_read(uncore
, RCPREVBSYTUPAVG
);
1787 busy_down
= intel_uncore_read(uncore
, RCPREVBSYTDNAVG
);
1788 max_avg
= intel_uncore_read(uncore
, RCBMAXAVG
);
1789 min_avg
= intel_uncore_read(uncore
, RCBMINAVG
);
1791 /* Handle RCS change request from hw */
1792 new_freq
= rps
->cur_freq
;
1793 if (busy_up
> max_avg
)
1795 else if (busy_down
< min_avg
)
1797 new_freq
= clamp(new_freq
,
1798 rps
->min_freq_softlimit
,
1799 rps
->max_freq_softlimit
);
1801 if (new_freq
!= rps
->cur_freq
&& gen5_rps_set(rps
, new_freq
))
1802 rps
->cur_freq
= new_freq
;
1804 spin_unlock(&mchdev_lock
);
1807 void intel_rps_init_early(struct intel_rps
*rps
)
1809 mutex_init(&rps
->lock
);
1810 mutex_init(&rps
->power
.mutex
);
1812 INIT_WORK(&rps
->work
, rps_work
);
1813 timer_setup(&rps
->timer
, rps_timer
, 0);
1815 atomic_set(&rps
->num_waiters
, 0);
1818 void intel_rps_init(struct intel_rps
*rps
)
1820 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1822 if (IS_CHERRYVIEW(i915
))
1824 else if (IS_VALLEYVIEW(i915
))
1826 else if (INTEL_GEN(i915
) >= 6)
1828 else if (IS_IRONLAKE_M(i915
))
1831 /* Derive initial user preferences/limits from the hardware limits */
1832 rps
->max_freq_softlimit
= rps
->max_freq
;
1833 rps
->min_freq_softlimit
= rps
->min_freq
;
1835 /* After setting max-softlimit, find the overclock max freq */
1836 if (IS_GEN(i915
, 6) || IS_IVYBRIDGE(i915
) || IS_HASWELL(i915
)) {
1839 sandybridge_pcode_read(i915
, GEN6_READ_OC_PARAMS
,
1841 if (params
& BIT(31)) { /* OC supported */
1843 "Overclocking supported, max: %dMHz, overclock: %dMHz\n",
1844 (rps
->max_freq
& 0xff) * 50,
1845 (params
& 0xff) * 50);
1846 rps
->max_freq
= params
& 0xff;
1850 /* Finally allow us to boost to max by default */
1851 rps
->boost_freq
= rps
->max_freq
;
1852 rps
->idle_freq
= rps
->min_freq
;
1854 /* Start in the middle, from here we will autotune based on workload */
1855 rps
->cur_freq
= rps
->efficient_freq
;
1857 rps
->pm_intrmsk_mbz
= 0;
1860 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
1861 * if GEN6_PM_UP_EI_EXPIRED is masked.
1863 * TODO: verify if this can be reproduced on VLV,CHV.
1865 if (INTEL_GEN(i915
) <= 7)
1866 rps
->pm_intrmsk_mbz
|= GEN6_PM_RP_UP_EI_EXPIRED
;
1868 if (INTEL_GEN(i915
) >= 8 && INTEL_GEN(i915
) < 11)
1869 rps
->pm_intrmsk_mbz
|= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC
;
1872 void intel_rps_sanitize(struct intel_rps
*rps
)
1874 if (INTEL_GEN(rps_to_i915(rps
)) >= 6)
1875 rps_disable_interrupts(rps
);
1878 u32
intel_rps_get_cagf(struct intel_rps
*rps
, u32 rpstat
)
1880 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1883 if (IS_VALLEYVIEW(i915
) || IS_CHERRYVIEW(i915
))
1884 cagf
= (rpstat
>> 8) & 0xff;
1885 else if (INTEL_GEN(i915
) >= 9)
1886 cagf
= (rpstat
& GEN9_CAGF_MASK
) >> GEN9_CAGF_SHIFT
;
1887 else if (IS_HASWELL(i915
) || IS_BROADWELL(i915
))
1888 cagf
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
1889 else if (INTEL_GEN(i915
) >= 6)
1890 cagf
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
1892 cagf
= gen5_invert_freq(rps
, (rpstat
& MEMSTAT_PSTATE_MASK
) >>
1893 MEMSTAT_PSTATE_SHIFT
);
1898 static u32
read_cagf(struct intel_rps
*rps
)
1900 struct drm_i915_private
*i915
= rps_to_i915(rps
);
1901 struct intel_uncore
*uncore
= rps_to_uncore(rps
);
1904 if (IS_VALLEYVIEW(i915
) || IS_CHERRYVIEW(i915
)) {
1905 vlv_punit_get(i915
);
1906 freq
= vlv_punit_read(i915
, PUNIT_REG_GPU_FREQ_STS
);
1907 vlv_punit_put(i915
);
1908 } else if (INTEL_GEN(i915
) >= 6) {
1909 freq
= intel_uncore_read(uncore
, GEN6_RPSTAT1
);
1911 freq
= intel_uncore_read(uncore
, MEMSTAT_ILK
);
1914 return intel_rps_get_cagf(rps
, freq
);
1917 u32
intel_rps_read_actual_frequency(struct intel_rps
*rps
)
1919 struct intel_runtime_pm
*rpm
= rps_to_uncore(rps
)->rpm
;
1920 intel_wakeref_t wakeref
;
1923 with_intel_runtime_pm_if_in_use(rpm
, wakeref
)
1924 freq
= intel_gpu_freq(rps
, read_cagf(rps
));
1929 /* External interface for intel_ips.ko */
1931 static struct drm_i915_private __rcu
*ips_mchdev
;
1934 * Tells the intel_ips driver that the i915 driver is now loaded, if
1935 * IPS got loaded first.
1937 * This awkward dance is so that neither module has to depend on the
1938 * other in order for IPS to do the appropriate communication of
1939 * GPU turbo limits to i915.
1942 ips_ping_for_i915_load(void)
1946 link
= symbol_get(ips_link_to_i915_driver
);
1949 symbol_put(ips_link_to_i915_driver
);
1953 void intel_rps_driver_register(struct intel_rps
*rps
)
1955 struct intel_gt
*gt
= rps_to_gt(rps
);
1958 * We only register the i915 ips part with intel-ips once everything is
1959 * set up, to avoid intel-ips sneaking in and reading bogus values.
1961 if (IS_GEN(gt
->i915
, 5)) {
1962 GEM_BUG_ON(ips_mchdev
);
1963 rcu_assign_pointer(ips_mchdev
, gt
->i915
);
1964 ips_ping_for_i915_load();
1968 void intel_rps_driver_unregister(struct intel_rps
*rps
)
1970 if (rcu_access_pointer(ips_mchdev
) == rps_to_i915(rps
))
1971 rcu_assign_pointer(ips_mchdev
, NULL
);
1974 static struct drm_i915_private
*mchdev_get(void)
1976 struct drm_i915_private
*i915
;
1979 i915
= rcu_dereference(ips_mchdev
);
1980 if (i915
&& !kref_get_unless_zero(&i915
->drm
.ref
))
1988 * i915_read_mch_val - return value for IPS use
1990 * Calculate and return a value for the IPS driver to use when deciding whether
1991 * we have thermal and power headroom to increase CPU or GPU power budget.
1993 unsigned long i915_read_mch_val(void)
1995 struct drm_i915_private
*i915
;
1996 unsigned long chipset_val
= 0;
1997 unsigned long graphics_val
= 0;
1998 intel_wakeref_t wakeref
;
2000 i915
= mchdev_get();
2004 with_intel_runtime_pm(&i915
->runtime_pm
, wakeref
) {
2005 struct intel_ips
*ips
= &i915
->gt
.rps
.ips
;
2007 spin_lock_irq(&mchdev_lock
);
2008 chipset_val
= __ips_chipset_val(ips
);
2009 graphics_val
= __ips_gfx_val(ips
);
2010 spin_unlock_irq(&mchdev_lock
);
2013 drm_dev_put(&i915
->drm
);
2014 return chipset_val
+ graphics_val
;
2016 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
2019 * i915_gpu_raise - raise GPU frequency limit
2021 * Raise the limit; IPS indicates we have thermal headroom.
2023 bool i915_gpu_raise(void)
2025 struct drm_i915_private
*i915
;
2026 struct intel_rps
*rps
;
2028 i915
= mchdev_get();
2032 rps
= &i915
->gt
.rps
;
2034 spin_lock_irq(&mchdev_lock
);
2035 if (rps
->max_freq_softlimit
< rps
->max_freq
)
2036 rps
->max_freq_softlimit
++;
2037 spin_unlock_irq(&mchdev_lock
);
2039 drm_dev_put(&i915
->drm
);
2042 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
2045 * i915_gpu_lower - lower GPU frequency limit
2047 * IPS indicates we're close to a thermal limit, so throttle back the GPU
2048 * frequency maximum.
2050 bool i915_gpu_lower(void)
2052 struct drm_i915_private
*i915
;
2053 struct intel_rps
*rps
;
2055 i915
= mchdev_get();
2059 rps
= &i915
->gt
.rps
;
2061 spin_lock_irq(&mchdev_lock
);
2062 if (rps
->max_freq_softlimit
> rps
->min_freq
)
2063 rps
->max_freq_softlimit
--;
2064 spin_unlock_irq(&mchdev_lock
);
2066 drm_dev_put(&i915
->drm
);
2069 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
2072 * i915_gpu_busy - indicate GPU business to IPS
2074 * Tell the IPS driver whether or not the GPU is busy.
2076 bool i915_gpu_busy(void)
2078 struct drm_i915_private
*i915
;
2081 i915
= mchdev_get();
2085 ret
= i915
->gt
.awake
;
2087 drm_dev_put(&i915
->drm
);
2090 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
2093 * i915_gpu_turbo_disable - disable graphics turbo
2095 * Disable graphics turbo by resetting the max frequency and setting the
2096 * current frequency to the default.
2098 bool i915_gpu_turbo_disable(void)
2100 struct drm_i915_private
*i915
;
2101 struct intel_rps
*rps
;
2104 i915
= mchdev_get();
2108 rps
= &i915
->gt
.rps
;
2110 spin_lock_irq(&mchdev_lock
);
2111 rps
->max_freq_softlimit
= rps
->min_freq
;
2112 ret
= gen5_rps_set(&i915
->gt
.rps
, rps
->min_freq
);
2113 spin_unlock_irq(&mchdev_lock
);
2115 drm_dev_put(&i915
->drm
);
2118 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
2120 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2121 #include "selftest_rps.c"