1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <drm/drm_atomic_state_helper.h>
8 #include "intel_atomic.h"
10 #include "intel_cdclk.h"
11 #include "intel_display_types.h"
13 #include "intel_sideband.h"
15 /* Parameters for Qclk Geyserville (QGV) */
16 struct intel_qgv_point
{
17 u16 dclk
, t_rp
, t_rdpre
, t_rc
, t_ras
, t_rcd
;
20 struct intel_qgv_info
{
21 struct intel_qgv_point points
[I915_NUM_QGV_POINTS
];
25 enum intel_dram_type dram_type
;
28 static int icl_pcode_read_mem_global_info(struct drm_i915_private
*dev_priv
,
29 struct intel_qgv_info
*qi
)
34 ret
= sandybridge_pcode_read(dev_priv
,
35 ICL_PCODE_MEM_SUBSYSYSTEM_INFO
|
36 ICL_PCODE_MEM_SS_READ_GLOBAL_INFO
,
41 if (IS_GEN(dev_priv
, 12)) {
44 qi
->dram_type
= INTEL_DRAM_DDR4
;
47 qi
->dram_type
= INTEL_DRAM_LPDDR4
;
50 qi
->dram_type
= INTEL_DRAM_DDR3
;
53 qi
->dram_type
= INTEL_DRAM_LPDDR3
;
56 MISSING_CASE(val
& 0xf);
59 } else if (IS_GEN(dev_priv
, 11)) {
62 qi
->dram_type
= INTEL_DRAM_DDR4
;
65 qi
->dram_type
= INTEL_DRAM_DDR3
;
68 qi
->dram_type
= INTEL_DRAM_LPDDR3
;
71 qi
->dram_type
= INTEL_DRAM_LPDDR4
;
74 MISSING_CASE(val
& 0xf);
78 MISSING_CASE(INTEL_GEN(dev_priv
));
79 qi
->dram_type
= INTEL_DRAM_LPDDR3
; /* Conservative default */
82 qi
->num_channels
= (val
& 0xf0) >> 4;
83 qi
->num_points
= (val
& 0xf00) >> 8;
85 if (IS_GEN(dev_priv
, 12))
86 qi
->t_bl
= qi
->dram_type
== INTEL_DRAM_DDR4
? 4 : 16;
87 else if (IS_GEN(dev_priv
, 11))
88 qi
->t_bl
= qi
->dram_type
== INTEL_DRAM_DDR4
? 4 : 8;
93 static int icl_pcode_read_qgv_point_info(struct drm_i915_private
*dev_priv
,
94 struct intel_qgv_point
*sp
,
97 u32 val
= 0, val2
= 0;
100 ret
= sandybridge_pcode_read(dev_priv
,
101 ICL_PCODE_MEM_SUBSYSYSTEM_INFO
|
102 ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point
),
107 sp
->dclk
= val
& 0xffff;
108 sp
->t_rp
= (val
& 0xff0000) >> 16;
109 sp
->t_rcd
= (val
& 0xff000000) >> 24;
111 sp
->t_rdpre
= val2
& 0xff;
112 sp
->t_ras
= (val2
& 0xff00) >> 8;
114 sp
->t_rc
= sp
->t_rp
+ sp
->t_ras
;
119 int icl_pcode_restrict_qgv_points(struct drm_i915_private
*dev_priv
,
124 /* bspec says to keep retrying for at least 1 ms */
125 ret
= skl_pcode_request(dev_priv
, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG
,
127 ICL_PCODE_POINTS_RESTRICTED_MASK
,
128 ICL_PCODE_POINTS_RESTRICTED
,
132 drm_err(&dev_priv
->drm
, "Failed to disable qgv points (%d)\n", ret
);
139 static int icl_get_qgv_points(struct drm_i915_private
*dev_priv
,
140 struct intel_qgv_info
*qi
)
144 ret
= icl_pcode_read_mem_global_info(dev_priv
, qi
);
148 if (drm_WARN_ON(&dev_priv
->drm
,
149 qi
->num_points
> ARRAY_SIZE(qi
->points
)))
150 qi
->num_points
= ARRAY_SIZE(qi
->points
);
152 for (i
= 0; i
< qi
->num_points
; i
++) {
153 struct intel_qgv_point
*sp
= &qi
->points
[i
];
155 ret
= icl_pcode_read_qgv_point_info(dev_priv
, sp
, i
);
159 drm_dbg_kms(&dev_priv
->drm
,
160 "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
161 i
, sp
->dclk
, sp
->t_rp
, sp
->t_rdpre
, sp
->t_ras
,
162 sp
->t_rcd
, sp
->t_rc
);
168 static int icl_calc_bw(int dclk
, int num
, int den
)
170 /* multiples of 16.666MHz (100/6) */
171 return DIV_ROUND_CLOSEST(num
* dclk
* 100, den
* 6);
174 static int icl_sagv_max_dclk(const struct intel_qgv_info
*qi
)
179 for (i
= 0; i
< qi
->num_points
; i
++)
180 dclk
= max(dclk
, qi
->points
[i
].dclk
);
185 struct intel_sa_info
{
187 u8 deburst
, deprogbwlimit
;
190 static const struct intel_sa_info icl_sa_info
= {
192 .deprogbwlimit
= 25, /* GB/s */
196 static const struct intel_sa_info tgl_sa_info
= {
198 .deprogbwlimit
= 34, /* GB/s */
202 static const struct intel_sa_info rkl_sa_info
= {
204 .deprogbwlimit
= 20, /* GB/s */
208 static int icl_get_bw_info(struct drm_i915_private
*dev_priv
, const struct intel_sa_info
*sa
)
210 struct intel_qgv_info qi
= {};
211 bool is_y_tile
= true; /* assume y tile may be used */
214 int ipqdepth
, ipqdepthpch
;
219 ret
= icl_get_qgv_points(dev_priv
, &qi
);
221 drm_dbg_kms(&dev_priv
->drm
,
222 "Failed to get memory subsystem information, ignoring bandwidth limits");
225 num_channels
= qi
.num_channels
;
227 deinterleave
= DIV_ROUND_UP(num_channels
, is_y_tile
? 4 : 2);
228 dclk_max
= icl_sagv_max_dclk(&qi
);
232 maxdebw
= min(sa
->deprogbwlimit
* 1000,
233 icl_calc_bw(dclk_max
, 16, 1) * 6 / 10); /* 60% */
234 ipqdepth
= min(ipqdepthpch
, sa
->displayrtids
/ num_channels
);
236 for (i
= 0; i
< ARRAY_SIZE(dev_priv
->max_bw
); i
++) {
237 struct intel_bw_info
*bi
= &dev_priv
->max_bw
[i
];
241 clpchgroup
= (sa
->deburst
* deinterleave
/ num_channels
) << i
;
242 bi
->num_planes
= (ipqdepth
- clpchgroup
) / clpchgroup
+ 1;
244 bi
->num_qgv_points
= qi
.num_points
;
246 for (j
= 0; j
< qi
.num_points
; j
++) {
247 const struct intel_qgv_point
*sp
= &qi
.points
[j
];
253 * FIXME what is the logic behind the
254 * assumed burst length?
256 ct
= max_t(int, sp
->t_rc
, sp
->t_rp
+ sp
->t_rcd
+
257 (clpchgroup
- 1) * qi
.t_bl
+ sp
->t_rdpre
);
258 bw
= icl_calc_bw(sp
->dclk
, clpchgroup
* 32 * num_channels
, ct
);
260 bi
->deratedbw
[j
] = min(maxdebw
,
261 bw
* 9 / 10); /* 90% */
263 drm_dbg_kms(&dev_priv
->drm
,
264 "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
265 i
, j
, bi
->num_planes
, bi
->deratedbw
[j
]);
268 if (bi
->num_planes
== 1)
273 * In case if SAGV is disabled in BIOS, we always get 1
274 * SAGV point, but we can't send PCode commands to restrict it
275 * as it will fail and pointless anyway.
277 if (qi
.num_points
== 1)
278 dev_priv
->sagv_status
= I915_SAGV_NOT_CONTROLLED
;
280 dev_priv
->sagv_status
= I915_SAGV_ENABLED
;
285 static unsigned int icl_max_bw(struct drm_i915_private
*dev_priv
,
286 int num_planes
, int qgv_point
)
291 * Let's return max bw for 0 planes
293 num_planes
= max(1, num_planes
);
295 for (i
= 0; i
< ARRAY_SIZE(dev_priv
->max_bw
); i
++) {
296 const struct intel_bw_info
*bi
=
297 &dev_priv
->max_bw
[i
];
300 * Pcode will not expose all QGV points when
301 * SAGV is forced to off/min/med/max.
303 if (qgv_point
>= bi
->num_qgv_points
)
306 if (num_planes
>= bi
->num_planes
)
307 return bi
->deratedbw
[qgv_point
];
313 void intel_bw_init_hw(struct drm_i915_private
*dev_priv
)
315 if (!HAS_DISPLAY(dev_priv
))
318 if (IS_ROCKETLAKE(dev_priv
))
319 icl_get_bw_info(dev_priv
, &rkl_sa_info
);
320 else if (IS_GEN(dev_priv
, 12))
321 icl_get_bw_info(dev_priv
, &tgl_sa_info
);
322 else if (IS_GEN(dev_priv
, 11))
323 icl_get_bw_info(dev_priv
, &icl_sa_info
);
326 static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state
*crtc_state
)
329 * We assume cursors are small enough
330 * to not not cause bandwidth problems.
332 return hweight8(crtc_state
->active_planes
& ~BIT(PLANE_CURSOR
));
335 static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state
*crtc_state
)
337 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
338 unsigned int data_rate
= 0;
339 enum plane_id plane_id
;
341 for_each_plane_id_on_crtc(crtc
, plane_id
) {
343 * We assume cursors are small enough
344 * to not not cause bandwidth problems.
346 if (plane_id
== PLANE_CURSOR
)
349 data_rate
+= crtc_state
->data_rate
[plane_id
];
355 void intel_bw_crtc_update(struct intel_bw_state
*bw_state
,
356 const struct intel_crtc_state
*crtc_state
)
358 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
359 struct drm_i915_private
*i915
= to_i915(crtc
->base
.dev
);
361 bw_state
->data_rate
[crtc
->pipe
] =
362 intel_bw_crtc_data_rate(crtc_state
);
363 bw_state
->num_active_planes
[crtc
->pipe
] =
364 intel_bw_crtc_num_active_planes(crtc_state
);
366 drm_dbg_kms(&i915
->drm
, "pipe %c data rate %u num active planes %u\n",
367 pipe_name(crtc
->pipe
),
368 bw_state
->data_rate
[crtc
->pipe
],
369 bw_state
->num_active_planes
[crtc
->pipe
]);
372 static unsigned int intel_bw_num_active_planes(struct drm_i915_private
*dev_priv
,
373 const struct intel_bw_state
*bw_state
)
375 unsigned int num_active_planes
= 0;
378 for_each_pipe(dev_priv
, pipe
)
379 num_active_planes
+= bw_state
->num_active_planes
[pipe
];
381 return num_active_planes
;
384 static unsigned int intel_bw_data_rate(struct drm_i915_private
*dev_priv
,
385 const struct intel_bw_state
*bw_state
)
387 unsigned int data_rate
= 0;
390 for_each_pipe(dev_priv
, pipe
)
391 data_rate
+= bw_state
->data_rate
[pipe
];
396 struct intel_bw_state
*
397 intel_atomic_get_old_bw_state(struct intel_atomic_state
*state
)
399 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
400 struct intel_global_state
*bw_state
;
402 bw_state
= intel_atomic_get_old_global_obj_state(state
, &dev_priv
->bw_obj
);
404 return to_intel_bw_state(bw_state
);
407 struct intel_bw_state
*
408 intel_atomic_get_new_bw_state(struct intel_atomic_state
*state
)
410 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
411 struct intel_global_state
*bw_state
;
413 bw_state
= intel_atomic_get_new_global_obj_state(state
, &dev_priv
->bw_obj
);
415 return to_intel_bw_state(bw_state
);
418 struct intel_bw_state
*
419 intel_atomic_get_bw_state(struct intel_atomic_state
*state
)
421 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
422 struct intel_global_state
*bw_state
;
424 bw_state
= intel_atomic_get_global_obj_state(state
, &dev_priv
->bw_obj
);
425 if (IS_ERR(bw_state
))
426 return ERR_CAST(bw_state
);
428 return to_intel_bw_state(bw_state
);
431 int skl_bw_calc_min_cdclk(struct intel_atomic_state
*state
)
433 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
434 struct intel_bw_state
*new_bw_state
= NULL
;
435 struct intel_bw_state
*old_bw_state
= NULL
;
436 const struct intel_crtc_state
*crtc_state
;
437 struct intel_crtc
*crtc
;
443 for_each_new_intel_crtc_in_state(state
, crtc
, crtc_state
, i
) {
444 enum plane_id plane_id
;
445 struct intel_dbuf_bw
*crtc_bw
;
447 new_bw_state
= intel_atomic_get_bw_state(state
);
448 if (IS_ERR(new_bw_state
))
449 return PTR_ERR(new_bw_state
);
451 old_bw_state
= intel_atomic_get_old_bw_state(state
);
453 crtc_bw
= &new_bw_state
->dbuf_bw
[crtc
->pipe
];
455 memset(&crtc_bw
->used_bw
, 0, sizeof(crtc_bw
->used_bw
));
457 if (!crtc_state
->hw
.active
)
460 for_each_plane_id_on_crtc(crtc
, plane_id
) {
461 const struct skl_ddb_entry
*plane_alloc
=
462 &crtc_state
->wm
.skl
.plane_ddb_y
[plane_id
];
463 const struct skl_ddb_entry
*uv_plane_alloc
=
464 &crtc_state
->wm
.skl
.plane_ddb_uv
[plane_id
];
465 unsigned int data_rate
= crtc_state
->data_rate
[plane_id
];
466 unsigned int dbuf_mask
= 0;
468 dbuf_mask
|= skl_ddb_dbuf_slice_mask(dev_priv
, plane_alloc
);
469 dbuf_mask
|= skl_ddb_dbuf_slice_mask(dev_priv
, uv_plane_alloc
);
472 * FIXME: To calculate that more properly we probably
473 * need to to split per plane data_rate into data_rate_y
474 * and data_rate_uv for multiplanar formats in order not
475 * to get accounted those twice if they happen to reside
476 * on different slices.
477 * However for pre-icl this would work anyway because
478 * we have only single slice and for icl+ uv plane has
479 * non-zero data rate.
480 * So in worst case those calculation are a bit
481 * pessimistic, which shouldn't pose any significant
484 for_each_dbuf_slice_in_mask(slice_id
, dbuf_mask
)
485 crtc_bw
->used_bw
[slice_id
] += data_rate
;
492 for_each_pipe(dev_priv
, pipe
) {
493 struct intel_dbuf_bw
*crtc_bw
;
495 crtc_bw
= &new_bw_state
->dbuf_bw
[pipe
];
497 for_each_dbuf_slice(slice_id
) {
499 * Current experimental observations show that contrary
500 * to BSpec we get underruns once we exceed 64 * CDCLK
501 * for slices in total.
502 * As a temporary measure in order not to keep CDCLK
503 * bumped up all the time we calculate CDCLK according
504 * to this formula for overall bw consumed by slices.
506 max_bw
+= crtc_bw
->used_bw
[slice_id
];
510 new_bw_state
->min_cdclk
= max_bw
/ 64;
512 if (new_bw_state
->min_cdclk
!= old_bw_state
->min_cdclk
) {
513 int ret
= intel_atomic_lock_global_state(&new_bw_state
->base
);
522 int intel_bw_calc_min_cdclk(struct intel_atomic_state
*state
)
524 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
525 struct intel_bw_state
*new_bw_state
= NULL
;
526 struct intel_bw_state
*old_bw_state
= NULL
;
527 const struct intel_crtc_state
*crtc_state
;
528 struct intel_crtc
*crtc
;
533 for_each_new_intel_crtc_in_state(state
, crtc
, crtc_state
, i
) {
534 new_bw_state
= intel_atomic_get_bw_state(state
);
535 if (IS_ERR(new_bw_state
))
536 return PTR_ERR(new_bw_state
);
538 old_bw_state
= intel_atomic_get_old_bw_state(state
);
544 for_each_pipe(dev_priv
, pipe
) {
545 struct intel_cdclk_state
*cdclk_state
;
547 cdclk_state
= intel_atomic_get_new_cdclk_state(state
);
551 min_cdclk
= max(cdclk_state
->min_cdclk
[pipe
], min_cdclk
);
554 new_bw_state
->min_cdclk
= min_cdclk
;
556 if (new_bw_state
->min_cdclk
!= old_bw_state
->min_cdclk
) {
557 int ret
= intel_atomic_lock_global_state(&new_bw_state
->base
);
566 int intel_bw_atomic_check(struct intel_atomic_state
*state
)
568 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
569 struct intel_crtc_state
*new_crtc_state
, *old_crtc_state
;
570 struct intel_bw_state
*new_bw_state
= NULL
;
571 const struct intel_bw_state
*old_bw_state
= NULL
;
572 unsigned int data_rate
;
573 unsigned int num_active_planes
;
574 struct intel_crtc
*crtc
;
576 u32 allowed_points
= 0;
577 unsigned int max_bw_point
= 0, max_bw
= 0;
578 unsigned int num_qgv_points
= dev_priv
->max_bw
[0].num_qgv_points
;
579 u32 mask
= (1 << num_qgv_points
) - 1;
581 /* FIXME earlier gens need some checks too */
582 if (INTEL_GEN(dev_priv
) < 11)
585 for_each_oldnew_intel_crtc_in_state(state
, crtc
, old_crtc_state
,
587 unsigned int old_data_rate
=
588 intel_bw_crtc_data_rate(old_crtc_state
);
589 unsigned int new_data_rate
=
590 intel_bw_crtc_data_rate(new_crtc_state
);
591 unsigned int old_active_planes
=
592 intel_bw_crtc_num_active_planes(old_crtc_state
);
593 unsigned int new_active_planes
=
594 intel_bw_crtc_num_active_planes(new_crtc_state
);
597 * Avoid locking the bw state when
598 * nothing significant has changed.
600 if (old_data_rate
== new_data_rate
&&
601 old_active_planes
== new_active_planes
)
604 new_bw_state
= intel_atomic_get_bw_state(state
);
605 if (IS_ERR(new_bw_state
))
606 return PTR_ERR(new_bw_state
);
608 new_bw_state
->data_rate
[crtc
->pipe
] = new_data_rate
;
609 new_bw_state
->num_active_planes
[crtc
->pipe
] = new_active_planes
;
611 drm_dbg_kms(&dev_priv
->drm
,
612 "pipe %c data rate %u num active planes %u\n",
613 pipe_name(crtc
->pipe
),
614 new_bw_state
->data_rate
[crtc
->pipe
],
615 new_bw_state
->num_active_planes
[crtc
->pipe
]);
621 ret
= intel_atomic_lock_global_state(&new_bw_state
->base
);
625 data_rate
= intel_bw_data_rate(dev_priv
, new_bw_state
);
626 data_rate
= DIV_ROUND_UP(data_rate
, 1000);
628 num_active_planes
= intel_bw_num_active_planes(dev_priv
, new_bw_state
);
630 for (i
= 0; i
< num_qgv_points
; i
++) {
631 unsigned int max_data_rate
;
633 max_data_rate
= icl_max_bw(dev_priv
, num_active_planes
, i
);
635 * We need to know which qgv point gives us
636 * maximum bandwidth in order to disable SAGV
637 * if we find that we exceed SAGV block time
638 * with watermarks. By that moment we already
639 * have those, as it is calculated earlier in
640 * intel_atomic_check,
642 if (max_data_rate
> max_bw
) {
644 max_bw
= max_data_rate
;
646 if (max_data_rate
>= data_rate
)
647 allowed_points
|= BIT(i
);
648 drm_dbg_kms(&dev_priv
->drm
, "QGV point %d: max bw %d required %d\n",
649 i
, max_data_rate
, data_rate
);
653 * BSpec states that we always should have at least one allowed point
654 * left, so if we couldn't - simply reject the configuration for obvious
657 if (allowed_points
== 0) {
658 drm_dbg_kms(&dev_priv
->drm
, "No QGV points provide sufficient memory"
659 " bandwidth %d for display configuration(%d active planes).\n",
660 data_rate
, num_active_planes
);
665 * Leave only single point with highest bandwidth, if
666 * we can't enable SAGV due to the increased memory latency it may
669 if (!intel_can_enable_sagv(dev_priv
, new_bw_state
)) {
670 allowed_points
= BIT(max_bw_point
);
671 drm_dbg_kms(&dev_priv
->drm
, "No SAGV, using single QGV point %d\n",
675 * We store the ones which need to be masked as that is what PCode
676 * actually accepts as a parameter.
678 new_bw_state
->qgv_points_mask
= ~allowed_points
& mask
;
680 old_bw_state
= intel_atomic_get_old_bw_state(state
);
682 * If the actual mask had changed we need to make sure that
683 * the commits are serialized(in case this is a nomodeset, nonblocking)
685 if (new_bw_state
->qgv_points_mask
!= old_bw_state
->qgv_points_mask
) {
686 ret
= intel_atomic_serialize_global_state(&new_bw_state
->base
);
694 static struct intel_global_state
*
695 intel_bw_duplicate_state(struct intel_global_obj
*obj
)
697 struct intel_bw_state
*state
;
699 state
= kmemdup(obj
->state
, sizeof(*state
), GFP_KERNEL
);
706 static void intel_bw_destroy_state(struct intel_global_obj
*obj
,
707 struct intel_global_state
*state
)
712 static const struct intel_global_state_funcs intel_bw_funcs
= {
713 .atomic_duplicate_state
= intel_bw_duplicate_state
,
714 .atomic_destroy_state
= intel_bw_destroy_state
,
717 int intel_bw_init(struct drm_i915_private
*dev_priv
)
719 struct intel_bw_state
*state
;
721 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
725 intel_atomic_global_obj_init(dev_priv
, &dev_priv
->bw_obj
,
726 &state
->base
, &intel_bw_funcs
);