1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
6 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__
9 #include "dpu_hw_ctl.h"
10 #include "dpu_hw_pingpong.h"
11 #include "dpu_hw_intf.h"
12 #include "dpu_encoder.h"
13 #include "dpu_trace.h"
15 #define RESERVED_BY_OTHER(h, r) \
16 ((h)->enc_id && (h)->enc_id != r)
19 * struct dpu_rm_requirements - Reservation requirements parameter bundle
20 * @topology: selected topology for the display
21 * @hw_res: Hardware resources required as reported by the encoders
23 struct dpu_rm_requirements
{
24 struct msm_display_topology topology
;
25 struct dpu_encoder_hw_resources hw_res
;
30 * struct dpu_rm_hw_blk - hardware block tracking list member
31 * @list: List head for list of all hardware blocks tracking items
32 * @id: Hardware ID number, within it's own space, ie. LM_X
33 * @enc_id: Encoder id to which this blk is binded
34 * @hw: Pointer to the hardware register access object for this block
36 struct dpu_rm_hw_blk
{
37 struct list_head list
;
40 struct dpu_hw_blk
*hw
;
43 void dpu_rm_init_hw_iter(
44 struct dpu_rm_hw_iter
*iter
,
46 enum dpu_hw_blk_type type
)
48 memset(iter
, 0, sizeof(*iter
));
49 iter
->enc_id
= enc_id
;
53 static bool _dpu_rm_get_hw_locked(struct dpu_rm
*rm
, struct dpu_rm_hw_iter
*i
)
55 struct list_head
*blk_list
;
57 if (!rm
|| !i
|| i
->type
>= DPU_HW_BLK_MAX
) {
58 DPU_ERROR("invalid rm\n");
63 blk_list
= &rm
->hw_blks
[i
->type
];
65 if (i
->blk
&& (&i
->blk
->list
== blk_list
)) {
66 DPU_DEBUG("attempt resume iteration past last\n");
70 i
->blk
= list_prepare_entry(i
->blk
, blk_list
, list
);
72 list_for_each_entry_continue(i
->blk
, blk_list
, list
) {
73 if (i
->enc_id
== i
->blk
->enc_id
) {
75 DPU_DEBUG("found type %d id %d for enc %d\n",
76 i
->type
, i
->blk
->id
, i
->enc_id
);
81 DPU_DEBUG("no match, type %d for enc %d\n", i
->type
, i
->enc_id
);
86 bool dpu_rm_get_hw(struct dpu_rm
*rm
, struct dpu_rm_hw_iter
*i
)
90 mutex_lock(&rm
->rm_lock
);
91 ret
= _dpu_rm_get_hw_locked(rm
, i
);
92 mutex_unlock(&rm
->rm_lock
);
97 static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type
, void *hw
)
101 dpu_hw_lm_destroy(hw
);
104 dpu_hw_ctl_destroy(hw
);
106 case DPU_HW_BLK_PINGPONG
:
107 dpu_hw_pingpong_destroy(hw
);
109 case DPU_HW_BLK_INTF
:
110 dpu_hw_intf_destroy(hw
);
112 case DPU_HW_BLK_SSPP
:
113 /* SSPPs are not managed by the resource manager */
115 /* Top is a singleton, not managed in hw_blks list */
118 DPU_ERROR("unsupported block type %d\n", type
);
123 int dpu_rm_destroy(struct dpu_rm
*rm
)
125 struct dpu_rm_hw_blk
*hw_cur
, *hw_nxt
;
126 enum dpu_hw_blk_type type
;
128 for (type
= 0; type
< DPU_HW_BLK_MAX
; type
++) {
129 list_for_each_entry_safe(hw_cur
, hw_nxt
, &rm
->hw_blks
[type
],
131 list_del(&hw_cur
->list
);
132 _dpu_rm_hw_destroy(type
, hw_cur
->hw
);
137 mutex_destroy(&rm
->rm_lock
);
142 static int _dpu_rm_hw_blk_create(
144 const struct dpu_mdss_cfg
*cat
,
146 enum dpu_hw_blk_type type
,
148 const void *hw_catalog_info
)
150 struct dpu_rm_hw_blk
*blk
;
155 hw
= dpu_hw_lm_init(id
, mmio
, cat
);
158 hw
= dpu_hw_ctl_init(id
, mmio
, cat
);
160 case DPU_HW_BLK_PINGPONG
:
161 hw
= dpu_hw_pingpong_init(id
, mmio
, cat
);
163 case DPU_HW_BLK_INTF
:
164 hw
= dpu_hw_intf_init(id
, mmio
, cat
);
166 case DPU_HW_BLK_SSPP
:
167 /* SSPPs are not managed by the resource manager */
169 /* Top is a singleton, not managed in hw_blks list */
172 DPU_ERROR("unsupported block type %d\n", type
);
176 if (IS_ERR_OR_NULL(hw
)) {
177 DPU_ERROR("failed hw object creation: type %d, err %ld\n",
182 blk
= kzalloc(sizeof(*blk
), GFP_KERNEL
);
184 _dpu_rm_hw_destroy(type
, hw
);
191 list_add_tail(&blk
->list
, &rm
->hw_blks
[type
]);
196 int dpu_rm_init(struct dpu_rm
*rm
,
197 struct dpu_mdss_cfg
*cat
,
201 enum dpu_hw_blk_type type
;
203 if (!rm
|| !cat
|| !mmio
) {
204 DPU_ERROR("invalid kms\n");
208 /* Clear, setup lists */
209 memset(rm
, 0, sizeof(*rm
));
211 mutex_init(&rm
->rm_lock
);
213 for (type
= 0; type
< DPU_HW_BLK_MAX
; type
++)
214 INIT_LIST_HEAD(&rm
->hw_blks
[type
]);
216 /* Interrogate HW catalog and create tracking items for hw blocks */
217 for (i
= 0; i
< cat
->mixer_count
; i
++) {
218 const struct dpu_lm_cfg
*lm
= &cat
->mixer
[i
];
220 if (lm
->pingpong
== PINGPONG_MAX
) {
221 DPU_DEBUG("skip mixer %d without pingpong\n", lm
->id
);
225 rc
= _dpu_rm_hw_blk_create(rm
, cat
, mmio
, DPU_HW_BLK_LM
,
226 cat
->mixer
[i
].id
, &cat
->mixer
[i
]);
228 DPU_ERROR("failed: lm hw not available\n");
232 if (!rm
->lm_max_width
) {
233 rm
->lm_max_width
= lm
->sblk
->maxwidth
;
234 } else if (rm
->lm_max_width
!= lm
->sblk
->maxwidth
) {
236 * Don't expect to have hw where lm max widths differ.
237 * If found, take the min.
239 DPU_ERROR("unsupported: lm maxwidth differs\n");
240 if (rm
->lm_max_width
> lm
->sblk
->maxwidth
)
241 rm
->lm_max_width
= lm
->sblk
->maxwidth
;
245 for (i
= 0; i
< cat
->pingpong_count
; i
++) {
246 rc
= _dpu_rm_hw_blk_create(rm
, cat
, mmio
, DPU_HW_BLK_PINGPONG
,
247 cat
->pingpong
[i
].id
, &cat
->pingpong
[i
]);
249 DPU_ERROR("failed: pp hw not available\n");
254 for (i
= 0; i
< cat
->intf_count
; i
++) {
255 if (cat
->intf
[i
].type
== INTF_NONE
) {
256 DPU_DEBUG("skip intf %d with type none\n", i
);
260 rc
= _dpu_rm_hw_blk_create(rm
, cat
, mmio
, DPU_HW_BLK_INTF
,
261 cat
->intf
[i
].id
, &cat
->intf
[i
]);
263 DPU_ERROR("failed: intf hw not available\n");
268 for (i
= 0; i
< cat
->ctl_count
; i
++) {
269 rc
= _dpu_rm_hw_blk_create(rm
, cat
, mmio
, DPU_HW_BLK_CTL
,
270 cat
->ctl
[i
].id
, &cat
->ctl
[i
]);
272 DPU_ERROR("failed: ctl hw not available\n");
285 static bool _dpu_rm_needs_split_display(const struct msm_display_topology
*top
)
287 return top
->num_intf
> 1;
291 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
292 * proposed use case requirements, incl. hardwired dependent blocks like
294 * @rm: dpu resource manager handle
295 * @enc_id: encoder id requesting for allocation
296 * @reqs: proposed use case requirements
297 * @lm: proposed layer mixer, function checks if lm, and all other hardwired
298 * blocks connected to the lm (pp) is available and appropriate
299 * @pp: output parameter, pingpong block attached to the layer mixer.
300 * NULL if pp was not available, or not matching requirements.
301 * @primary_lm: if non-null, this function check if lm is compatible primary_lm
302 * as well as satisfying all other requirements
303 * @Return: true if lm matches all requirements, false otherwise
305 static bool _dpu_rm_check_lm_and_get_connected_blks(
308 struct dpu_rm_requirements
*reqs
,
309 struct dpu_rm_hw_blk
*lm
,
310 struct dpu_rm_hw_blk
**pp
,
311 struct dpu_rm_hw_blk
*primary_lm
)
313 const struct dpu_lm_cfg
*lm_cfg
= to_dpu_hw_mixer(lm
->hw
)->cap
;
314 struct dpu_rm_hw_iter iter
;
318 DPU_DEBUG("check lm %d pp %d\n",
319 lm_cfg
->id
, lm_cfg
->pingpong
);
321 /* Check if this layer mixer is a peer of the proposed primary LM */
323 const struct dpu_lm_cfg
*prim_lm_cfg
=
324 to_dpu_hw_mixer(primary_lm
->hw
)->cap
;
326 if (!test_bit(lm_cfg
->id
, &prim_lm_cfg
->lm_pair_mask
)) {
327 DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg
->id
,
333 /* Already reserved? */
334 if (RESERVED_BY_OTHER(lm
, enc_id
)) {
335 DPU_DEBUG("lm %d already reserved\n", lm_cfg
->id
);
339 dpu_rm_init_hw_iter(&iter
, 0, DPU_HW_BLK_PINGPONG
);
340 while (_dpu_rm_get_hw_locked(rm
, &iter
)) {
341 if (iter
.blk
->id
== lm_cfg
->pingpong
) {
348 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg
->pingpong
);
352 if (RESERVED_BY_OTHER(*pp
, enc_id
)) {
353 DPU_DEBUG("lm %d pp %d already reserved\n", lm
->id
,
361 static int _dpu_rm_reserve_lms(struct dpu_rm
*rm
, uint32_t enc_id
,
362 struct dpu_rm_requirements
*reqs
)
365 struct dpu_rm_hw_blk
*lm
[MAX_BLOCKS
];
366 struct dpu_rm_hw_blk
*pp
[MAX_BLOCKS
];
367 struct dpu_rm_hw_iter iter_i
, iter_j
;
371 if (!reqs
->topology
.num_lm
) {
372 DPU_ERROR("invalid number of lm: %d\n", reqs
->topology
.num_lm
);
376 /* Find a primary mixer */
377 dpu_rm_init_hw_iter(&iter_i
, 0, DPU_HW_BLK_LM
);
378 while (lm_count
!= reqs
->topology
.num_lm
&&
379 _dpu_rm_get_hw_locked(rm
, &iter_i
)) {
380 memset(&lm
, 0, sizeof(lm
));
381 memset(&pp
, 0, sizeof(pp
));
384 lm
[lm_count
] = iter_i
.blk
;
386 if (!_dpu_rm_check_lm_and_get_connected_blks(
387 rm
, enc_id
, reqs
, lm
[lm_count
],
388 &pp
[lm_count
], NULL
))
393 /* Valid primary mixer found, find matching peers */
394 dpu_rm_init_hw_iter(&iter_j
, 0, DPU_HW_BLK_LM
);
396 while (lm_count
!= reqs
->topology
.num_lm
&&
397 _dpu_rm_get_hw_locked(rm
, &iter_j
)) {
398 if (iter_i
.blk
== iter_j
.blk
)
401 if (!_dpu_rm_check_lm_and_get_connected_blks(
402 rm
, enc_id
, reqs
, iter_j
.blk
,
403 &pp
[lm_count
], iter_i
.blk
))
406 lm
[lm_count
] = iter_j
.blk
;
411 if (lm_count
!= reqs
->topology
.num_lm
) {
412 DPU_DEBUG("unable to find appropriate mixers\n");
416 for (i
= 0; i
< ARRAY_SIZE(lm
); i
++) {
420 lm
[i
]->enc_id
= enc_id
;
421 pp
[i
]->enc_id
= enc_id
;
423 trace_dpu_rm_reserve_lms(lm
[i
]->id
, enc_id
, pp
[i
]->id
);
429 static int _dpu_rm_reserve_ctls(
432 const struct msm_display_topology
*top
)
434 struct dpu_rm_hw_blk
*ctls
[MAX_BLOCKS
];
435 struct dpu_rm_hw_iter iter
;
436 int i
= 0, num_ctls
= 0;
437 bool needs_split_display
= false;
439 memset(&ctls
, 0, sizeof(ctls
));
441 /* each hw_intf needs its own hw_ctrl to program its control path */
442 num_ctls
= top
->num_intf
;
444 needs_split_display
= _dpu_rm_needs_split_display(top
);
446 dpu_rm_init_hw_iter(&iter
, 0, DPU_HW_BLK_CTL
);
447 while (_dpu_rm_get_hw_locked(rm
, &iter
)) {
448 const struct dpu_hw_ctl
*ctl
= to_dpu_hw_ctl(iter
.blk
->hw
);
449 unsigned long features
= ctl
->caps
->features
;
450 bool has_split_display
;
452 if (RESERVED_BY_OTHER(iter
.blk
, enc_id
))
455 has_split_display
= BIT(DPU_CTL_SPLIT_DISPLAY
) & features
;
457 DPU_DEBUG("ctl %d caps 0x%lX\n", iter
.blk
->id
, features
);
459 if (needs_split_display
!= has_split_display
)
463 DPU_DEBUG("ctl %d match\n", iter
.blk
->id
);
472 for (i
= 0; i
< ARRAY_SIZE(ctls
) && i
< num_ctls
; i
++) {
473 ctls
[i
]->enc_id
= enc_id
;
474 trace_dpu_rm_reserve_ctls(ctls
[i
]->id
, enc_id
);
480 static int _dpu_rm_reserve_intf(
484 enum dpu_hw_blk_type type
)
486 struct dpu_rm_hw_iter iter
;
489 /* Find the block entry in the rm, and note the reservation */
490 dpu_rm_init_hw_iter(&iter
, 0, type
);
491 while (_dpu_rm_get_hw_locked(rm
, &iter
)) {
492 if (iter
.blk
->id
!= id
)
495 if (RESERVED_BY_OTHER(iter
.blk
, enc_id
)) {
496 DPU_ERROR("type %d id %d already reserved\n", type
, id
);
500 iter
.blk
->enc_id
= enc_id
;
501 trace_dpu_rm_reserve_intf(iter
.blk
->id
, enc_id
);
505 /* Shouldn't happen since intfs are fixed at probe */
507 DPU_ERROR("couldn't find type %d id %d\n", type
, id
);
514 static int _dpu_rm_reserve_intf_related_hw(
517 struct dpu_encoder_hw_resources
*hw_res
)
522 for (i
= 0; i
< ARRAY_SIZE(hw_res
->intfs
); i
++) {
523 if (hw_res
->intfs
[i
] == INTF_MODE_NONE
)
526 ret
= _dpu_rm_reserve_intf(rm
, enc_id
, id
,
535 static int _dpu_rm_make_reservation(
537 struct drm_encoder
*enc
,
538 struct drm_crtc_state
*crtc_state
,
539 struct dpu_rm_requirements
*reqs
)
543 ret
= _dpu_rm_reserve_lms(rm
, enc
->base
.id
, reqs
);
545 DPU_ERROR("unable to find appropriate mixers\n");
549 ret
= _dpu_rm_reserve_ctls(rm
, enc
->base
.id
, &reqs
->topology
);
551 DPU_ERROR("unable to find appropriate CTL\n");
555 ret
= _dpu_rm_reserve_intf_related_hw(rm
, enc
->base
.id
, &reqs
->hw_res
);
562 static int _dpu_rm_populate_requirements(
564 struct drm_encoder
*enc
,
565 struct drm_crtc_state
*crtc_state
,
566 struct dpu_rm_requirements
*reqs
,
567 struct msm_display_topology req_topology
)
569 dpu_encoder_get_hw_resources(enc
, &reqs
->hw_res
);
571 reqs
->topology
= req_topology
;
573 DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
574 reqs
->topology
.num_lm
, reqs
->topology
.num_enc
,
575 reqs
->topology
.num_intf
);
580 static void _dpu_rm_release_reservation(struct dpu_rm
*rm
, uint32_t enc_id
)
582 struct dpu_rm_hw_blk
*blk
;
583 enum dpu_hw_blk_type type
;
585 for (type
= 0; type
< DPU_HW_BLK_MAX
; type
++) {
586 list_for_each_entry(blk
, &rm
->hw_blks
[type
], list
) {
587 if (blk
->enc_id
== enc_id
) {
589 DPU_DEBUG("rel enc %d %d %d\n", enc_id
,
596 void dpu_rm_release(struct dpu_rm
*rm
, struct drm_encoder
*enc
)
598 mutex_lock(&rm
->rm_lock
);
600 _dpu_rm_release_reservation(rm
, enc
->base
.id
);
602 mutex_unlock(&rm
->rm_lock
);
607 struct drm_encoder
*enc
,
608 struct drm_crtc_state
*crtc_state
,
609 struct msm_display_topology topology
,
612 struct dpu_rm_requirements reqs
;
615 /* Check if this is just a page-flip */
616 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
619 DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
620 enc
->base
.id
, crtc_state
->crtc
->base
.id
, test_only
);
622 mutex_lock(&rm
->rm_lock
);
624 ret
= _dpu_rm_populate_requirements(rm
, enc
, crtc_state
, &reqs
,
627 DPU_ERROR("failed to populate hw requirements\n");
631 ret
= _dpu_rm_make_reservation(rm
, enc
, crtc_state
, &reqs
);
633 DPU_ERROR("failed to reserve hw resources: %d\n", ret
);
634 _dpu_rm_release_reservation(rm
, enc
->base
.id
);
635 } else if (test_only
) {
636 /* test_only: test the reservation and then undo */
637 DPU_DEBUG("test_only: discard test [enc: %d]\n",
639 _dpu_rm_release_reservation(rm
, enc
->base
.id
);
643 mutex_unlock(&rm
->rm_lock
);