1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <drm/drm_atomic_state_helper.h>
9 #include "intel_display_types.h"
10 #include "intel_sideband.h"
12 /* Parameters for Qclk Geyserville (QGV) */
13 struct intel_qgv_point
{
14 u16 dclk
, t_rp
, t_rdpre
, t_rc
, t_ras
, t_rcd
;
17 struct intel_qgv_info
{
18 struct intel_qgv_point points
[I915_NUM_QGV_POINTS
];
22 enum intel_dram_type dram_type
;
25 static int icl_pcode_read_mem_global_info(struct drm_i915_private
*dev_priv
,
26 struct intel_qgv_info
*qi
)
31 ret
= sandybridge_pcode_read(dev_priv
,
32 ICL_PCODE_MEM_SUBSYSYSTEM_INFO
|
33 ICL_PCODE_MEM_SS_READ_GLOBAL_INFO
,
38 if (IS_GEN(dev_priv
, 12)) {
41 qi
->dram_type
= INTEL_DRAM_DDR4
;
44 qi
->dram_type
= INTEL_DRAM_LPDDR4
;
47 qi
->dram_type
= INTEL_DRAM_DDR3
;
50 qi
->dram_type
= INTEL_DRAM_LPDDR3
;
53 MISSING_CASE(val
& 0xf);
56 } else if (IS_GEN(dev_priv
, 11)) {
59 qi
->dram_type
= INTEL_DRAM_DDR4
;
62 qi
->dram_type
= INTEL_DRAM_DDR3
;
65 qi
->dram_type
= INTEL_DRAM_LPDDR3
;
68 qi
->dram_type
= INTEL_DRAM_LPDDR4
;
71 MISSING_CASE(val
& 0xf);
75 MISSING_CASE(INTEL_GEN(dev_priv
));
76 qi
->dram_type
= INTEL_DRAM_LPDDR3
; /* Conservative default */
79 qi
->num_channels
= (val
& 0xf0) >> 4;
80 qi
->num_points
= (val
& 0xf00) >> 8;
82 if (IS_GEN(dev_priv
, 12))
83 qi
->t_bl
= qi
->dram_type
== INTEL_DRAM_DDR4
? 4 : 16;
84 else if (IS_GEN(dev_priv
, 11))
85 qi
->t_bl
= qi
->dram_type
== INTEL_DRAM_DDR4
? 4 : 8;
90 static int icl_pcode_read_qgv_point_info(struct drm_i915_private
*dev_priv
,
91 struct intel_qgv_point
*sp
,
94 u32 val
= 0, val2
= 0;
97 ret
= sandybridge_pcode_read(dev_priv
,
98 ICL_PCODE_MEM_SUBSYSYSTEM_INFO
|
99 ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point
),
104 sp
->dclk
= val
& 0xffff;
105 sp
->t_rp
= (val
& 0xff0000) >> 16;
106 sp
->t_rcd
= (val
& 0xff000000) >> 24;
108 sp
->t_rdpre
= val2
& 0xff;
109 sp
->t_ras
= (val2
& 0xff00) >> 8;
111 sp
->t_rc
= sp
->t_rp
+ sp
->t_ras
;
116 static int icl_get_qgv_points(struct drm_i915_private
*dev_priv
,
117 struct intel_qgv_info
*qi
)
121 ret
= icl_pcode_read_mem_global_info(dev_priv
, qi
);
125 if (WARN_ON(qi
->num_points
> ARRAY_SIZE(qi
->points
)))
126 qi
->num_points
= ARRAY_SIZE(qi
->points
);
128 for (i
= 0; i
< qi
->num_points
; i
++) {
129 struct intel_qgv_point
*sp
= &qi
->points
[i
];
131 ret
= icl_pcode_read_qgv_point_info(dev_priv
, sp
, i
);
135 DRM_DEBUG_KMS("QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
136 i
, sp
->dclk
, sp
->t_rp
, sp
->t_rdpre
, sp
->t_ras
,
137 sp
->t_rcd
, sp
->t_rc
);
143 static int icl_calc_bw(int dclk
, int num
, int den
)
145 /* multiples of 16.666MHz (100/6) */
146 return DIV_ROUND_CLOSEST(num
* dclk
* 100, den
* 6);
149 static int icl_sagv_max_dclk(const struct intel_qgv_info
*qi
)
154 for (i
= 0; i
< qi
->num_points
; i
++)
155 dclk
= max(dclk
, qi
->points
[i
].dclk
);
160 struct intel_sa_info
{
162 u8 deburst
, deprogbwlimit
;
165 static const struct intel_sa_info icl_sa_info
= {
167 .deprogbwlimit
= 25, /* GB/s */
171 static const struct intel_sa_info tgl_sa_info
= {
173 .deprogbwlimit
= 34, /* GB/s */
177 static int icl_get_bw_info(struct drm_i915_private
*dev_priv
, const struct intel_sa_info
*sa
)
179 struct intel_qgv_info qi
= {};
180 bool is_y_tile
= true; /* assume y tile may be used */
183 int ipqdepth
, ipqdepthpch
;
188 ret
= icl_get_qgv_points(dev_priv
, &qi
);
190 DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits");
193 num_channels
= qi
.num_channels
;
195 deinterleave
= DIV_ROUND_UP(num_channels
, is_y_tile
? 4 : 2);
196 dclk_max
= icl_sagv_max_dclk(&qi
);
200 maxdebw
= min(sa
->deprogbwlimit
* 1000,
201 icl_calc_bw(dclk_max
, 16, 1) * 6 / 10); /* 60% */
202 ipqdepth
= min(ipqdepthpch
, sa
->displayrtids
/ num_channels
);
204 for (i
= 0; i
< ARRAY_SIZE(dev_priv
->max_bw
); i
++) {
205 struct intel_bw_info
*bi
= &dev_priv
->max_bw
[i
];
209 clpchgroup
= (sa
->deburst
* deinterleave
/ num_channels
) << i
;
210 bi
->num_planes
= (ipqdepth
- clpchgroup
) / clpchgroup
+ 1;
212 bi
->num_qgv_points
= qi
.num_points
;
214 for (j
= 0; j
< qi
.num_points
; j
++) {
215 const struct intel_qgv_point
*sp
= &qi
.points
[j
];
221 * FIXME what is the logic behind the
222 * assumed burst length?
224 ct
= max_t(int, sp
->t_rc
, sp
->t_rp
+ sp
->t_rcd
+
225 (clpchgroup
- 1) * qi
.t_bl
+ sp
->t_rdpre
);
226 bw
= icl_calc_bw(sp
->dclk
, clpchgroup
* 32 * num_channels
, ct
);
228 bi
->deratedbw
[j
] = min(maxdebw
,
229 bw
* 9 / 10); /* 90% */
231 DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
232 i
, j
, bi
->num_planes
, bi
->deratedbw
[j
]);
235 if (bi
->num_planes
== 1)
242 static unsigned int icl_max_bw(struct drm_i915_private
*dev_priv
,
243 int num_planes
, int qgv_point
)
247 for (i
= 0; i
< ARRAY_SIZE(dev_priv
->max_bw
); i
++) {
248 const struct intel_bw_info
*bi
=
249 &dev_priv
->max_bw
[i
];
252 * Pcode will not expose all QGV points when
253 * SAGV is forced to off/min/med/max.
255 if (qgv_point
>= bi
->num_qgv_points
)
258 if (num_planes
>= bi
->num_planes
)
259 return bi
->deratedbw
[qgv_point
];
265 void intel_bw_init_hw(struct drm_i915_private
*dev_priv
)
267 if (!HAS_DISPLAY(dev_priv
))
270 if (IS_GEN(dev_priv
, 12))
271 icl_get_bw_info(dev_priv
, &tgl_sa_info
);
272 else if (IS_GEN(dev_priv
, 11))
273 icl_get_bw_info(dev_priv
, &icl_sa_info
);
276 static unsigned int intel_max_data_rate(struct drm_i915_private
*dev_priv
,
279 if (INTEL_GEN(dev_priv
) >= 11) {
281 * Any bw group has same amount of QGV points
283 const struct intel_bw_info
*bi
=
284 &dev_priv
->max_bw
[0];
285 unsigned int min_bw
= UINT_MAX
;
289 * FIXME with SAGV disabled maybe we can assume
290 * point 1 will always be used? Seems to match
291 * the behaviour observed in the wild.
293 for (i
= 0; i
< bi
->num_qgv_points
; i
++) {
294 unsigned int bw
= icl_max_bw(dev_priv
, num_planes
, i
);
296 min_bw
= min(bw
, min_bw
);
304 static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state
*crtc_state
)
307 * We assume cursors are small enough
308 * to not not cause bandwidth problems.
310 return hweight8(crtc_state
->active_planes
& ~BIT(PLANE_CURSOR
));
313 static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state
*crtc_state
)
315 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
316 unsigned int data_rate
= 0;
317 enum plane_id plane_id
;
319 for_each_plane_id_on_crtc(crtc
, plane_id
) {
321 * We assume cursors are small enough
322 * to not not cause bandwidth problems.
324 if (plane_id
== PLANE_CURSOR
)
327 data_rate
+= crtc_state
->data_rate
[plane_id
];
333 void intel_bw_crtc_update(struct intel_bw_state
*bw_state
,
334 const struct intel_crtc_state
*crtc_state
)
336 struct intel_crtc
*crtc
= to_intel_crtc(crtc_state
->uapi
.crtc
);
338 bw_state
->data_rate
[crtc
->pipe
] =
339 intel_bw_crtc_data_rate(crtc_state
);
340 bw_state
->num_active_planes
[crtc
->pipe
] =
341 intel_bw_crtc_num_active_planes(crtc_state
);
343 DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
344 pipe_name(crtc
->pipe
),
345 bw_state
->data_rate
[crtc
->pipe
],
346 bw_state
->num_active_planes
[crtc
->pipe
]);
349 static unsigned int intel_bw_num_active_planes(struct drm_i915_private
*dev_priv
,
350 const struct intel_bw_state
*bw_state
)
352 unsigned int num_active_planes
= 0;
355 for_each_pipe(dev_priv
, pipe
)
356 num_active_planes
+= bw_state
->num_active_planes
[pipe
];
358 return num_active_planes
;
361 static unsigned int intel_bw_data_rate(struct drm_i915_private
*dev_priv
,
362 const struct intel_bw_state
*bw_state
)
364 unsigned int data_rate
= 0;
367 for_each_pipe(dev_priv
, pipe
)
368 data_rate
+= bw_state
->data_rate
[pipe
];
373 static struct intel_bw_state
*
374 intel_atomic_get_bw_state(struct intel_atomic_state
*state
)
376 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
377 struct drm_private_state
*bw_state
;
379 bw_state
= drm_atomic_get_private_obj_state(&state
->base
,
381 if (IS_ERR(bw_state
))
382 return ERR_CAST(bw_state
);
384 return to_intel_bw_state(bw_state
);
387 int intel_bw_atomic_check(struct intel_atomic_state
*state
)
389 struct drm_i915_private
*dev_priv
= to_i915(state
->base
.dev
);
390 struct intel_crtc_state
*new_crtc_state
, *old_crtc_state
;
391 struct intel_bw_state
*bw_state
= NULL
;
392 unsigned int data_rate
, max_data_rate
;
393 unsigned int num_active_planes
;
394 struct intel_crtc
*crtc
;
397 /* FIXME earlier gens need some checks too */
398 if (INTEL_GEN(dev_priv
) < 11)
401 for_each_oldnew_intel_crtc_in_state(state
, crtc
, old_crtc_state
,
403 unsigned int old_data_rate
=
404 intel_bw_crtc_data_rate(old_crtc_state
);
405 unsigned int new_data_rate
=
406 intel_bw_crtc_data_rate(new_crtc_state
);
407 unsigned int old_active_planes
=
408 intel_bw_crtc_num_active_planes(old_crtc_state
);
409 unsigned int new_active_planes
=
410 intel_bw_crtc_num_active_planes(new_crtc_state
);
413 * Avoid locking the bw state when
414 * nothing significant has changed.
416 if (old_data_rate
== new_data_rate
&&
417 old_active_planes
== new_active_planes
)
420 bw_state
= intel_atomic_get_bw_state(state
);
421 if (IS_ERR(bw_state
))
422 return PTR_ERR(bw_state
);
424 bw_state
->data_rate
[crtc
->pipe
] = new_data_rate
;
425 bw_state
->num_active_planes
[crtc
->pipe
] = new_active_planes
;
427 DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
428 pipe_name(crtc
->pipe
),
429 bw_state
->data_rate
[crtc
->pipe
],
430 bw_state
->num_active_planes
[crtc
->pipe
]);
436 data_rate
= intel_bw_data_rate(dev_priv
, bw_state
);
437 num_active_planes
= intel_bw_num_active_planes(dev_priv
, bw_state
);
439 max_data_rate
= intel_max_data_rate(dev_priv
, num_active_planes
);
441 data_rate
= DIV_ROUND_UP(data_rate
, 1000);
443 if (data_rate
> max_data_rate
) {
444 DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
445 data_rate
, max_data_rate
, num_active_planes
);
452 static struct drm_private_state
*intel_bw_duplicate_state(struct drm_private_obj
*obj
)
454 struct intel_bw_state
*state
;
456 state
= kmemdup(obj
->state
, sizeof(*state
), GFP_KERNEL
);
460 __drm_atomic_helper_private_obj_duplicate_state(obj
, &state
->base
);
465 static void intel_bw_destroy_state(struct drm_private_obj
*obj
,
466 struct drm_private_state
*state
)
471 static const struct drm_private_state_funcs intel_bw_funcs
= {
472 .atomic_duplicate_state
= intel_bw_duplicate_state
,
473 .atomic_destroy_state
= intel_bw_destroy_state
,
476 int intel_bw_init(struct drm_i915_private
*dev_priv
)
478 struct intel_bw_state
*state
;
480 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
484 drm_atomic_private_obj_init(&dev_priv
->drm
, &dev_priv
->bw_obj
,
485 &state
->base
, &intel_bw_funcs
);
490 void intel_bw_cleanup(struct drm_i915_private
*dev_priv
)
492 drm_atomic_private_obj_fini(&dev_priv
->bw_obj
);