treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / display / intel_bw.c
blobb228671d5a5d3d5fb89bc24d08f496001c825748
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
6 #include <drm/drm_atomic_state_helper.h>
8 #include "intel_bw.h"
9 #include "intel_display_types.h"
10 #include "intel_sideband.h"
12 /* Parameters for Qclk Geyserville (QGV) */
13 struct intel_qgv_point {
14 u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
17 struct intel_qgv_info {
18 struct intel_qgv_point points[I915_NUM_QGV_POINTS];
19 u8 num_points;
20 u8 num_channels;
21 u8 t_bl;
22 enum intel_dram_type dram_type;
25 static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
26 struct intel_qgv_info *qi)
28 u32 val = 0;
29 int ret;
31 ret = sandybridge_pcode_read(dev_priv,
32 ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
33 ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
34 &val, NULL);
35 if (ret)
36 return ret;
38 if (IS_GEN(dev_priv, 12)) {
39 switch (val & 0xf) {
40 case 0:
41 qi->dram_type = INTEL_DRAM_DDR4;
42 break;
43 case 3:
44 qi->dram_type = INTEL_DRAM_LPDDR4;
45 break;
46 case 4:
47 qi->dram_type = INTEL_DRAM_DDR3;
48 break;
49 case 5:
50 qi->dram_type = INTEL_DRAM_LPDDR3;
51 break;
52 default:
53 MISSING_CASE(val & 0xf);
54 break;
56 } else if (IS_GEN(dev_priv, 11)) {
57 switch (val & 0xf) {
58 case 0:
59 qi->dram_type = INTEL_DRAM_DDR4;
60 break;
61 case 1:
62 qi->dram_type = INTEL_DRAM_DDR3;
63 break;
64 case 2:
65 qi->dram_type = INTEL_DRAM_LPDDR3;
66 break;
67 case 3:
68 qi->dram_type = INTEL_DRAM_LPDDR4;
69 break;
70 default:
71 MISSING_CASE(val & 0xf);
72 break;
74 } else {
75 MISSING_CASE(INTEL_GEN(dev_priv));
76 qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
79 qi->num_channels = (val & 0xf0) >> 4;
80 qi->num_points = (val & 0xf00) >> 8;
82 if (IS_GEN(dev_priv, 12))
83 qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
84 else if (IS_GEN(dev_priv, 11))
85 qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
87 return 0;
90 static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
91 struct intel_qgv_point *sp,
92 int point)
94 u32 val = 0, val2 = 0;
95 int ret;
97 ret = sandybridge_pcode_read(dev_priv,
98 ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
99 ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
100 &val, &val2);
101 if (ret)
102 return ret;
104 sp->dclk = val & 0xffff;
105 sp->t_rp = (val & 0xff0000) >> 16;
106 sp->t_rcd = (val & 0xff000000) >> 24;
108 sp->t_rdpre = val2 & 0xff;
109 sp->t_ras = (val2 & 0xff00) >> 8;
111 sp->t_rc = sp->t_rp + sp->t_ras;
113 return 0;
116 static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
117 struct intel_qgv_info *qi)
119 int i, ret;
121 ret = icl_pcode_read_mem_global_info(dev_priv, qi);
122 if (ret)
123 return ret;
125 if (WARN_ON(qi->num_points > ARRAY_SIZE(qi->points)))
126 qi->num_points = ARRAY_SIZE(qi->points);
128 for (i = 0; i < qi->num_points; i++) {
129 struct intel_qgv_point *sp = &qi->points[i];
131 ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
132 if (ret)
133 return ret;
135 DRM_DEBUG_KMS("QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
136 i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
137 sp->t_rcd, sp->t_rc);
140 return 0;
143 static int icl_calc_bw(int dclk, int num, int den)
145 /* multiples of 16.666MHz (100/6) */
146 return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
149 static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
151 u16 dclk = 0;
152 int i;
154 for (i = 0; i < qi->num_points; i++)
155 dclk = max(dclk, qi->points[i].dclk);
157 return dclk;
160 struct intel_sa_info {
161 u16 displayrtids;
162 u8 deburst, deprogbwlimit;
165 static const struct intel_sa_info icl_sa_info = {
166 .deburst = 8,
167 .deprogbwlimit = 25, /* GB/s */
168 .displayrtids = 128,
171 static const struct intel_sa_info tgl_sa_info = {
172 .deburst = 16,
173 .deprogbwlimit = 34, /* GB/s */
174 .displayrtids = 256,
177 static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
179 struct intel_qgv_info qi = {};
180 bool is_y_tile = true; /* assume y tile may be used */
181 int num_channels;
182 int deinterleave;
183 int ipqdepth, ipqdepthpch;
184 int dclk_max;
185 int maxdebw;
186 int i, ret;
188 ret = icl_get_qgv_points(dev_priv, &qi);
189 if (ret) {
190 DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits");
191 return ret;
193 num_channels = qi.num_channels;
195 deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
196 dclk_max = icl_sagv_max_dclk(&qi);
198 ipqdepthpch = 16;
200 maxdebw = min(sa->deprogbwlimit * 1000,
201 icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
202 ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
204 for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
205 struct intel_bw_info *bi = &dev_priv->max_bw[i];
206 int clpchgroup;
207 int j;
209 clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
210 bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
212 bi->num_qgv_points = qi.num_points;
214 for (j = 0; j < qi.num_points; j++) {
215 const struct intel_qgv_point *sp = &qi.points[j];
216 int ct, bw;
219 * Max row cycle time
221 * FIXME what is the logic behind the
222 * assumed burst length?
224 ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
225 (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
226 bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
228 bi->deratedbw[j] = min(maxdebw,
229 bw * 9 / 10); /* 90% */
231 DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
232 i, j, bi->num_planes, bi->deratedbw[j]);
235 if (bi->num_planes == 1)
236 break;
239 return 0;
242 static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
243 int num_planes, int qgv_point)
245 int i;
247 for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
248 const struct intel_bw_info *bi =
249 &dev_priv->max_bw[i];
252 * Pcode will not expose all QGV points when
253 * SAGV is forced to off/min/med/max.
255 if (qgv_point >= bi->num_qgv_points)
256 return UINT_MAX;
258 if (num_planes >= bi->num_planes)
259 return bi->deratedbw[qgv_point];
262 return 0;
265 void intel_bw_init_hw(struct drm_i915_private *dev_priv)
267 if (!HAS_DISPLAY(dev_priv))
268 return;
270 if (IS_GEN(dev_priv, 12))
271 icl_get_bw_info(dev_priv, &tgl_sa_info);
272 else if (IS_GEN(dev_priv, 11))
273 icl_get_bw_info(dev_priv, &icl_sa_info);
276 static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
277 int num_planes)
279 if (INTEL_GEN(dev_priv) >= 11) {
281 * Any bw group has same amount of QGV points
283 const struct intel_bw_info *bi =
284 &dev_priv->max_bw[0];
285 unsigned int min_bw = UINT_MAX;
286 int i;
289 * FIXME with SAGV disabled maybe we can assume
290 * point 1 will always be used? Seems to match
291 * the behaviour observed in the wild.
293 for (i = 0; i < bi->num_qgv_points; i++) {
294 unsigned int bw = icl_max_bw(dev_priv, num_planes, i);
296 min_bw = min(bw, min_bw);
298 return min_bw;
299 } else {
300 return UINT_MAX;
304 static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
307 * We assume cursors are small enough
308 * to not not cause bandwidth problems.
310 return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
313 static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
315 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
316 unsigned int data_rate = 0;
317 enum plane_id plane_id;
319 for_each_plane_id_on_crtc(crtc, plane_id) {
321 * We assume cursors are small enough
322 * to not not cause bandwidth problems.
324 if (plane_id == PLANE_CURSOR)
325 continue;
327 data_rate += crtc_state->data_rate[plane_id];
330 return data_rate;
333 void intel_bw_crtc_update(struct intel_bw_state *bw_state,
334 const struct intel_crtc_state *crtc_state)
336 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
338 bw_state->data_rate[crtc->pipe] =
339 intel_bw_crtc_data_rate(crtc_state);
340 bw_state->num_active_planes[crtc->pipe] =
341 intel_bw_crtc_num_active_planes(crtc_state);
343 DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
344 pipe_name(crtc->pipe),
345 bw_state->data_rate[crtc->pipe],
346 bw_state->num_active_planes[crtc->pipe]);
349 static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
350 const struct intel_bw_state *bw_state)
352 unsigned int num_active_planes = 0;
353 enum pipe pipe;
355 for_each_pipe(dev_priv, pipe)
356 num_active_planes += bw_state->num_active_planes[pipe];
358 return num_active_planes;
361 static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
362 const struct intel_bw_state *bw_state)
364 unsigned int data_rate = 0;
365 enum pipe pipe;
367 for_each_pipe(dev_priv, pipe)
368 data_rate += bw_state->data_rate[pipe];
370 return data_rate;
373 static struct intel_bw_state *
374 intel_atomic_get_bw_state(struct intel_atomic_state *state)
376 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
377 struct drm_private_state *bw_state;
379 bw_state = drm_atomic_get_private_obj_state(&state->base,
380 &dev_priv->bw_obj);
381 if (IS_ERR(bw_state))
382 return ERR_CAST(bw_state);
384 return to_intel_bw_state(bw_state);
387 int intel_bw_atomic_check(struct intel_atomic_state *state)
389 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
390 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
391 struct intel_bw_state *bw_state = NULL;
392 unsigned int data_rate, max_data_rate;
393 unsigned int num_active_planes;
394 struct intel_crtc *crtc;
395 int i;
397 /* FIXME earlier gens need some checks too */
398 if (INTEL_GEN(dev_priv) < 11)
399 return 0;
401 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
402 new_crtc_state, i) {
403 unsigned int old_data_rate =
404 intel_bw_crtc_data_rate(old_crtc_state);
405 unsigned int new_data_rate =
406 intel_bw_crtc_data_rate(new_crtc_state);
407 unsigned int old_active_planes =
408 intel_bw_crtc_num_active_planes(old_crtc_state);
409 unsigned int new_active_planes =
410 intel_bw_crtc_num_active_planes(new_crtc_state);
413 * Avoid locking the bw state when
414 * nothing significant has changed.
416 if (old_data_rate == new_data_rate &&
417 old_active_planes == new_active_planes)
418 continue;
420 bw_state = intel_atomic_get_bw_state(state);
421 if (IS_ERR(bw_state))
422 return PTR_ERR(bw_state);
424 bw_state->data_rate[crtc->pipe] = new_data_rate;
425 bw_state->num_active_planes[crtc->pipe] = new_active_planes;
427 DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
428 pipe_name(crtc->pipe),
429 bw_state->data_rate[crtc->pipe],
430 bw_state->num_active_planes[crtc->pipe]);
433 if (!bw_state)
434 return 0;
436 data_rate = intel_bw_data_rate(dev_priv, bw_state);
437 num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
439 max_data_rate = intel_max_data_rate(dev_priv, num_active_planes);
441 data_rate = DIV_ROUND_UP(data_rate, 1000);
443 if (data_rate > max_data_rate) {
444 DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
445 data_rate, max_data_rate, num_active_planes);
446 return -EINVAL;
449 return 0;
452 static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj *obj)
454 struct intel_bw_state *state;
456 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
457 if (!state)
458 return NULL;
460 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
462 return &state->base;
465 static void intel_bw_destroy_state(struct drm_private_obj *obj,
466 struct drm_private_state *state)
468 kfree(state);
471 static const struct drm_private_state_funcs intel_bw_funcs = {
472 .atomic_duplicate_state = intel_bw_duplicate_state,
473 .atomic_destroy_state = intel_bw_destroy_state,
476 int intel_bw_init(struct drm_i915_private *dev_priv)
478 struct intel_bw_state *state;
480 state = kzalloc(sizeof(*state), GFP_KERNEL);
481 if (!state)
482 return -ENOMEM;
484 drm_atomic_private_obj_init(&dev_priv->drm, &dev_priv->bw_obj,
485 &state->base, &intel_bw_funcs);
487 return 0;
490 void intel_bw_cleanup(struct drm_i915_private *dev_priv)
492 drm_atomic_private_obj_fini(&dev_priv->bw_obj);