WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_rm.c
blobfd2d104f0a91d622e3311b5ce6170275cc95854e
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
6 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__
7 #include "dpu_kms.h"
8 #include "dpu_hw_lm.h"
9 #include "dpu_hw_ctl.h"
10 #include "dpu_hw_pingpong.h"
11 #include "dpu_hw_intf.h"
12 #include "dpu_hw_dspp.h"
13 #include "dpu_hw_merge3d.h"
14 #include "dpu_encoder.h"
15 #include "dpu_trace.h"
18 static inline bool reserved_by_other(uint32_t *res_map, int idx,
19 uint32_t enc_id)
21 return res_map[idx] && res_map[idx] != enc_id;
24 /**
25 * struct dpu_rm_requirements - Reservation requirements parameter bundle
26 * @topology: selected topology for the display
27 * @hw_res: Hardware resources required as reported by the encoders
29 struct dpu_rm_requirements {
30 struct msm_display_topology topology;
31 struct dpu_encoder_hw_resources hw_res;
34 int dpu_rm_destroy(struct dpu_rm *rm)
36 int i;
38 for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
39 struct dpu_hw_pingpong *hw;
41 if (rm->pingpong_blks[i]) {
42 hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
43 dpu_hw_pingpong_destroy(hw);
46 for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) {
47 struct dpu_hw_merge_3d *hw;
49 if (rm->merge_3d_blks[i]) {
50 hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]);
51 dpu_hw_merge_3d_destroy(hw);
54 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
55 struct dpu_hw_mixer *hw;
57 if (rm->mixer_blks[i]) {
58 hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
59 dpu_hw_lm_destroy(hw);
62 for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
63 struct dpu_hw_ctl *hw;
65 if (rm->ctl_blks[i]) {
66 hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
67 dpu_hw_ctl_destroy(hw);
70 for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) {
71 struct dpu_hw_intf *hw;
73 if (rm->intf_blks[i]) {
74 hw = to_dpu_hw_intf(rm->intf_blks[i]);
75 dpu_hw_intf_destroy(hw);
79 return 0;
82 int dpu_rm_init(struct dpu_rm *rm,
83 struct dpu_mdss_cfg *cat,
84 void __iomem *mmio)
86 int rc, i;
88 if (!rm || !cat || !mmio) {
89 DPU_ERROR("invalid kms\n");
90 return -EINVAL;
93 /* Clear, setup lists */
94 memset(rm, 0, sizeof(*rm));
96 /* Interrogate HW catalog and create tracking items for hw blocks */
97 for (i = 0; i < cat->mixer_count; i++) {
98 struct dpu_hw_mixer *hw;
99 const struct dpu_lm_cfg *lm = &cat->mixer[i];
101 if (lm->pingpong == PINGPONG_MAX) {
102 DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
103 continue;
106 if (lm->id < LM_0 || lm->id >= LM_MAX) {
107 DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
108 continue;
110 hw = dpu_hw_lm_init(lm->id, mmio, cat);
111 if (IS_ERR_OR_NULL(hw)) {
112 rc = PTR_ERR(hw);
113 DPU_ERROR("failed lm object creation: err %d\n", rc);
114 goto fail;
116 rm->mixer_blks[lm->id - LM_0] = &hw->base;
118 if (!rm->lm_max_width) {
119 rm->lm_max_width = lm->sblk->maxwidth;
120 } else if (rm->lm_max_width != lm->sblk->maxwidth) {
122 * Don't expect to have hw where lm max widths differ.
123 * If found, take the min.
125 DPU_ERROR("unsupported: lm maxwidth differs\n");
126 if (rm->lm_max_width > lm->sblk->maxwidth)
127 rm->lm_max_width = lm->sblk->maxwidth;
131 for (i = 0; i < cat->merge_3d_count; i++) {
132 struct dpu_hw_merge_3d *hw;
133 const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
135 if (merge_3d->id < MERGE_3D_0 || merge_3d->id >= MERGE_3D_MAX) {
136 DPU_ERROR("skip merge_3d %d with invalid id\n", merge_3d->id);
137 continue;
139 hw = dpu_hw_merge_3d_init(merge_3d->id, mmio, cat);
140 if (IS_ERR_OR_NULL(hw)) {
141 rc = PTR_ERR(hw);
142 DPU_ERROR("failed merge_3d object creation: err %d\n",
143 rc);
144 goto fail;
146 rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base;
149 for (i = 0; i < cat->pingpong_count; i++) {
150 struct dpu_hw_pingpong *hw;
151 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
153 if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
154 DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
155 continue;
157 hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
158 if (IS_ERR_OR_NULL(hw)) {
159 rc = PTR_ERR(hw);
160 DPU_ERROR("failed pingpong object creation: err %d\n",
161 rc);
162 goto fail;
164 if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX)
165 hw->merge_3d = rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0];
166 rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
169 for (i = 0; i < cat->intf_count; i++) {
170 struct dpu_hw_intf *hw;
171 const struct dpu_intf_cfg *intf = &cat->intf[i];
173 if (intf->type == INTF_NONE) {
174 DPU_DEBUG("skip intf %d with type none\n", i);
175 continue;
177 if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
178 DPU_ERROR("skip intf %d with invalid id\n", intf->id);
179 continue;
181 hw = dpu_hw_intf_init(intf->id, mmio, cat);
182 if (IS_ERR_OR_NULL(hw)) {
183 rc = PTR_ERR(hw);
184 DPU_ERROR("failed intf object creation: err %d\n", rc);
185 goto fail;
187 rm->intf_blks[intf->id - INTF_0] = &hw->base;
190 for (i = 0; i < cat->ctl_count; i++) {
191 struct dpu_hw_ctl *hw;
192 const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
194 if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
195 DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
196 continue;
198 hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
199 if (IS_ERR_OR_NULL(hw)) {
200 rc = PTR_ERR(hw);
201 DPU_ERROR("failed ctl object creation: err %d\n", rc);
202 goto fail;
204 rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
207 for (i = 0; i < cat->dspp_count; i++) {
208 struct dpu_hw_dspp *hw;
209 const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
211 if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) {
212 DPU_ERROR("skip dspp %d with invalid id\n", dspp->id);
213 continue;
215 hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
216 if (IS_ERR_OR_NULL(hw)) {
217 rc = PTR_ERR(hw);
218 DPU_ERROR("failed dspp object creation: err %d\n", rc);
219 goto fail;
221 rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
224 return 0;
226 fail:
227 dpu_rm_destroy(rm);
229 return rc ? rc : -EFAULT;
232 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
234 return top->num_intf > 1;
238 * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
239 * @rm: dpu resource manager handle
240 * @primary_idx: index of primary mixer in rm->mixer_blks[]
241 * @peer_idx: index of other mixer in rm->mixer_blks[]
242 * Return: true if rm->mixer_blks[peer_idx] is a peer of
243 * rm->mixer_blks[primary_idx]
245 static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
246 int peer_idx)
248 const struct dpu_lm_cfg *prim_lm_cfg;
249 const struct dpu_lm_cfg *peer_cfg;
251 prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
252 peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
254 if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
255 DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
256 peer_cfg->id);
257 return false;
259 return true;
263 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
264 * proposed use case requirements, incl. hardwired dependent blocks like
265 * pingpong
266 * @rm: dpu resource manager handle
267 * @global_state: resources shared across multiple kms objects
268 * @enc_id: encoder id requesting for allocation
269 * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
270 * if lm, and all other hardwired blocks connected to the lm (pp) is
271 * available and appropriate
272 * @pp_idx: output parameter, index of pingpong block attached to the layer
273 * mixer in rm->pingpong_blks[].
274 * @dspp_idx: output parameter, index of dspp block attached to the layer
275 * mixer in rm->dspp_blks[].
276 * @reqs: input parameter, rm requirements for HW blocks needed in the
277 * datapath.
278 * Return: true if lm matches all requirements, false otherwise
280 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
281 struct dpu_global_state *global_state,
282 uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
283 struct dpu_rm_requirements *reqs)
285 const struct dpu_lm_cfg *lm_cfg;
286 int idx;
288 /* Already reserved? */
289 if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
290 DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
291 return false;
294 lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
295 idx = lm_cfg->pingpong - PINGPONG_0;
296 if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
297 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
298 return false;
301 if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
302 DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
303 lm_cfg->pingpong);
304 return false;
306 *pp_idx = idx;
308 if (!reqs->topology.num_dspp)
309 return true;
311 idx = lm_cfg->dspp - DSPP_0;
312 if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
313 DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
314 return false;
317 if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
318 DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
319 lm_cfg->dspp);
320 return false;
322 *dspp_idx = idx;
324 return true;
327 static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
328 struct dpu_global_state *global_state,
329 uint32_t enc_id,
330 struct dpu_rm_requirements *reqs)
333 int lm_idx[MAX_BLOCKS];
334 int pp_idx[MAX_BLOCKS];
335 int dspp_idx[MAX_BLOCKS] = {0};
336 int i, j, lm_count = 0;
338 if (!reqs->topology.num_lm) {
339 DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
340 return -EINVAL;
343 /* Find a primary mixer */
344 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
345 lm_count < reqs->topology.num_lm; i++) {
346 if (!rm->mixer_blks[i])
347 continue;
349 lm_count = 0;
350 lm_idx[lm_count] = i;
352 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
353 enc_id, i, &pp_idx[lm_count],
354 &dspp_idx[lm_count], reqs)) {
355 continue;
358 ++lm_count;
360 /* Valid primary mixer found, find matching peers */
361 for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
362 lm_count < reqs->topology.num_lm; j++) {
363 if (!rm->mixer_blks[j])
364 continue;
366 if (!_dpu_rm_check_lm_peer(rm, i, j)) {
367 DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
368 LM_0 + i);
369 continue;
372 if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
373 global_state, enc_id, j,
374 &pp_idx[lm_count], &dspp_idx[lm_count],
375 reqs)) {
376 continue;
379 lm_idx[lm_count] = j;
380 ++lm_count;
384 if (lm_count != reqs->topology.num_lm) {
385 DPU_DEBUG("unable to find appropriate mixers\n");
386 return -ENAVAIL;
389 for (i = 0; i < lm_count; i++) {
390 global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
391 global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
392 global_state->dspp_to_enc_id[dspp_idx[i]] =
393 reqs->topology.num_dspp ? enc_id : 0;
395 trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
396 pp_idx[i] + PINGPONG_0);
399 return 0;
402 static int _dpu_rm_reserve_ctls(
403 struct dpu_rm *rm,
404 struct dpu_global_state *global_state,
405 uint32_t enc_id,
406 const struct msm_display_topology *top)
408 int ctl_idx[MAX_BLOCKS];
409 int i = 0, j, num_ctls;
410 bool needs_split_display;
412 /* each hw_intf needs its own hw_ctrl to program its control path */
413 num_ctls = top->num_intf;
415 needs_split_display = _dpu_rm_needs_split_display(top);
417 for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
418 const struct dpu_hw_ctl *ctl;
419 unsigned long features;
420 bool has_split_display;
422 if (!rm->ctl_blks[j])
423 continue;
424 if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
425 continue;
427 ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
428 features = ctl->caps->features;
429 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
431 DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features);
433 if (needs_split_display != has_split_display)
434 continue;
436 ctl_idx[i] = j;
437 DPU_DEBUG("ctl %d match\n", j + CTL_0);
439 if (++i == num_ctls)
440 break;
444 if (i != num_ctls)
445 return -ENAVAIL;
447 for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
448 global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
449 trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
452 return 0;
455 static int _dpu_rm_reserve_intf(
456 struct dpu_rm *rm,
457 struct dpu_global_state *global_state,
458 uint32_t enc_id,
459 uint32_t id)
461 int idx = id - INTF_0;
463 if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) {
464 DPU_ERROR("invalid intf id: %d", id);
465 return -EINVAL;
468 if (!rm->intf_blks[idx]) {
469 DPU_ERROR("couldn't find intf id %d\n", id);
470 return -EINVAL;
473 if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) {
474 DPU_ERROR("intf id %d already reserved\n", id);
475 return -ENAVAIL;
478 global_state->intf_to_enc_id[idx] = enc_id;
479 return 0;
482 static int _dpu_rm_reserve_intf_related_hw(
483 struct dpu_rm *rm,
484 struct dpu_global_state *global_state,
485 uint32_t enc_id,
486 struct dpu_encoder_hw_resources *hw_res)
488 int i, ret = 0;
489 u32 id;
491 for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
492 if (hw_res->intfs[i] == INTF_MODE_NONE)
493 continue;
494 id = i + INTF_0;
495 ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id);
496 if (ret)
497 return ret;
500 return ret;
503 static int _dpu_rm_make_reservation(
504 struct dpu_rm *rm,
505 struct dpu_global_state *global_state,
506 struct drm_encoder *enc,
507 struct dpu_rm_requirements *reqs)
509 int ret;
511 ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
512 if (ret) {
513 DPU_ERROR("unable to find appropriate mixers\n");
514 return ret;
517 ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
518 &reqs->topology);
519 if (ret) {
520 DPU_ERROR("unable to find appropriate CTL\n");
521 return ret;
524 ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id,
525 &reqs->hw_res);
526 if (ret)
527 return ret;
529 return ret;
532 static int _dpu_rm_populate_requirements(
533 struct drm_encoder *enc,
534 struct dpu_rm_requirements *reqs,
535 struct msm_display_topology req_topology)
537 dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
539 reqs->topology = req_topology;
541 DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
542 reqs->topology.num_lm, reqs->topology.num_enc,
543 reqs->topology.num_intf);
545 return 0;
548 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
549 uint32_t enc_id)
551 int i;
553 for (i = 0; i < cnt; i++) {
554 if (res_mapping[i] == enc_id)
555 res_mapping[i] = 0;
559 void dpu_rm_release(struct dpu_global_state *global_state,
560 struct drm_encoder *enc)
562 _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
563 ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
564 _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
565 ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
566 _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
567 ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
568 _dpu_rm_clear_mapping(global_state->intf_to_enc_id,
569 ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id);
572 int dpu_rm_reserve(
573 struct dpu_rm *rm,
574 struct dpu_global_state *global_state,
575 struct drm_encoder *enc,
576 struct drm_crtc_state *crtc_state,
577 struct msm_display_topology topology)
579 struct dpu_rm_requirements reqs;
580 int ret;
582 /* Check if this is just a page-flip */
583 if (!drm_atomic_crtc_needs_modeset(crtc_state))
584 return 0;
586 if (IS_ERR(global_state)) {
587 DPU_ERROR("failed to global state\n");
588 return PTR_ERR(global_state);
591 DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
592 enc->base.id, crtc_state->crtc->base.id);
594 ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
595 if (ret) {
596 DPU_ERROR("failed to populate hw requirements\n");
597 return ret;
600 ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
601 if (ret)
602 DPU_ERROR("failed to reserve hw resources: %d\n", ret);
606 return ret;
609 int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
610 struct dpu_global_state *global_state, uint32_t enc_id,
611 enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
613 struct dpu_hw_blk **hw_blks;
614 uint32_t *hw_to_enc_id;
615 int i, num_blks, max_blks;
617 switch (type) {
618 case DPU_HW_BLK_PINGPONG:
619 hw_blks = rm->pingpong_blks;
620 hw_to_enc_id = global_state->pingpong_to_enc_id;
621 max_blks = ARRAY_SIZE(rm->pingpong_blks);
622 break;
623 case DPU_HW_BLK_LM:
624 hw_blks = rm->mixer_blks;
625 hw_to_enc_id = global_state->mixer_to_enc_id;
626 max_blks = ARRAY_SIZE(rm->mixer_blks);
627 break;
628 case DPU_HW_BLK_CTL:
629 hw_blks = rm->ctl_blks;
630 hw_to_enc_id = global_state->ctl_to_enc_id;
631 max_blks = ARRAY_SIZE(rm->ctl_blks);
632 break;
633 case DPU_HW_BLK_INTF:
634 hw_blks = rm->intf_blks;
635 hw_to_enc_id = global_state->intf_to_enc_id;
636 max_blks = ARRAY_SIZE(rm->intf_blks);
637 break;
638 case DPU_HW_BLK_DSPP:
639 hw_blks = rm->dspp_blks;
640 hw_to_enc_id = global_state->dspp_to_enc_id;
641 max_blks = ARRAY_SIZE(rm->dspp_blks);
642 break;
643 default:
644 DPU_ERROR("blk type %d not managed by rm\n", type);
645 return 0;
648 num_blks = 0;
649 for (i = 0; i < max_blks; i++) {
650 if (hw_to_enc_id[i] != enc_id)
651 continue;
653 if (num_blks == blks_size) {
654 DPU_ERROR("More than %d resources assigned to enc %d\n",
655 blks_size, enc_id);
656 break;
658 blks[num_blks++] = hw_blks[i];
661 return num_blks;