2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
18 * CTL - MDP Control Pool Manager
20 * Controls are shared between all display interfaces.
22 * They are intended to be used for data path configuration.
23 * The top level register programming describes the complete data path for
24 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
26 * Hardware capabilities determine the number of concurrent data paths
28 * In certain use cases (high-resolution dual pipe), one single CTL can be
29 * shared across multiple CRTCs.
32 #define CTL_STAT_BUSY 0x1
33 #define CTL_STAT_BOOKED 0x2
36 struct mdp5_interface intf
;
43 struct mdp5_ctl_manager
*ctlm
;
48 /* CTL status bitmask */
51 /* Operation Mode Configuration for the Pipeline */
52 struct op_mode pipeline
;
54 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
58 /* when do CTL registers need to be flushed? (mask of trigger bits) */
59 u32 pending_ctl_trigger
;
63 /* True if the current CTL has FLUSH bits pending for single FLUSH. */
66 struct mdp5_ctl
*pair
; /* Paired CTL to be flushed together */
69 struct mdp5_ctl_manager
{
70 struct drm_device
*dev
;
72 /* number of CTL / Layer Mixers in this hw config: */
76 /* to filter out non-present bits in the current hardware config */
79 /* status for single FLUSH */
80 bool single_flush_supported
;
81 u32 single_flush_pending_mask
;
83 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
85 struct mdp5_ctl ctls
[MAX_CTL
];
89 struct mdp5_kms
*get_kms(struct mdp5_ctl_manager
*ctl_mgr
)
91 struct msm_drm_private
*priv
= ctl_mgr
->dev
->dev_private
;
93 return to_mdp5_kms(to_mdp_kms(priv
->kms
));
97 void ctl_write(struct mdp5_ctl
*ctl
, u32 reg
, u32 data
)
99 struct mdp5_kms
*mdp5_kms
= get_kms(ctl
->ctlm
);
101 (void)ctl
->reg_offset
; /* TODO use this instead of mdp5_write */
102 mdp5_write(mdp5_kms
, reg
, data
);
106 u32
ctl_read(struct mdp5_ctl
*ctl
, u32 reg
)
108 struct mdp5_kms
*mdp5_kms
= get_kms(ctl
->ctlm
);
110 (void)ctl
->reg_offset
; /* TODO use this instead of mdp5_write */
111 return mdp5_read(mdp5_kms
, reg
);
114 static void set_display_intf(struct mdp5_kms
*mdp5_kms
,
115 struct mdp5_interface
*intf
)
120 spin_lock_irqsave(&mdp5_kms
->resource_lock
, flags
);
121 intf_sel
= mdp5_read(mdp5_kms
, REG_MDP5_MDP_DISP_INTF_SEL(0));
125 intf_sel
&= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK
;
126 intf_sel
|= MDP5_MDP_DISP_INTF_SEL_INTF0(intf
->type
);
129 intf_sel
&= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK
;
130 intf_sel
|= MDP5_MDP_DISP_INTF_SEL_INTF1(intf
->type
);
133 intf_sel
&= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK
;
134 intf_sel
|= MDP5_MDP_DISP_INTF_SEL_INTF2(intf
->type
);
137 intf_sel
&= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK
;
138 intf_sel
|= MDP5_MDP_DISP_INTF_SEL_INTF3(intf
->type
);
145 mdp5_write(mdp5_kms
, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel
);
146 spin_unlock_irqrestore(&mdp5_kms
->resource_lock
, flags
);
149 static void set_ctl_op(struct mdp5_ctl
*ctl
, struct mdp5_interface
*intf
)
154 if (!mdp5_cfg_intf_is_virtual(intf
->type
))
155 ctl_op
|= MDP5_CTL_OP_INTF_NUM(INTF0
+ intf
->num
);
157 switch (intf
->type
) {
159 if (intf
->mode
== MDP5_INTF_DSI_MODE_COMMAND
)
160 ctl_op
|= MDP5_CTL_OP_CMD_MODE
;
164 if (intf
->mode
== MDP5_INTF_WB_MODE_LINE
)
165 ctl_op
|= MDP5_CTL_OP_MODE(MODE_WB_2_LINE
);
172 spin_lock_irqsave(&ctl
->hw_lock
, flags
);
173 ctl_write(ctl
, REG_MDP5_CTL_OP(ctl
->id
), ctl_op
);
174 spin_unlock_irqrestore(&ctl
->hw_lock
, flags
);
177 int mdp5_ctl_set_pipeline(struct mdp5_ctl
*ctl
,
178 struct mdp5_interface
*intf
, int lm
)
180 struct mdp5_ctl_manager
*ctl_mgr
= ctl
->ctlm
;
181 struct mdp5_kms
*mdp5_kms
= get_kms(ctl_mgr
);
183 if (unlikely(WARN_ON(intf
->num
!= ctl
->pipeline
.intf
.num
))) {
184 dev_err(mdp5_kms
->dev
->dev
,
185 "CTL %d is allocated by INTF %d, but used by INTF %d\n",
186 ctl
->id
, ctl
->pipeline
.intf
.num
, intf
->num
);
192 memcpy(&ctl
->pipeline
.intf
, intf
, sizeof(*intf
));
194 ctl
->pipeline
.start_mask
= mdp_ctl_flush_mask_lm(ctl
->lm
) |
195 mdp_ctl_flush_mask_encoder(intf
);
197 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
198 if (!mdp5_cfg_intf_is_virtual(intf
->type
))
199 set_display_intf(mdp5_kms
, intf
);
201 set_ctl_op(ctl
, intf
);
206 static bool start_signal_needed(struct mdp5_ctl
*ctl
)
208 struct op_mode
*pipeline
= &ctl
->pipeline
;
210 if (!pipeline
->encoder_enabled
|| pipeline
->start_mask
!= 0)
213 switch (pipeline
->intf
.type
) {
217 return pipeline
->intf
.mode
== MDP5_INTF_DSI_MODE_COMMAND
;
224 * send_start_signal() - Overlay Processor Start Signal
226 * For a given control operation (display pipeline), a START signal needs to be
227 * executed in order to kick off operation and activate all layers.
228 * e.g.: DSI command mode, Writeback
230 static void send_start_signal(struct mdp5_ctl
*ctl
)
234 spin_lock_irqsave(&ctl
->hw_lock
, flags
);
235 ctl_write(ctl
, REG_MDP5_CTL_START(ctl
->id
), 1);
236 spin_unlock_irqrestore(&ctl
->hw_lock
, flags
);
239 static void refill_start_mask(struct mdp5_ctl
*ctl
)
241 struct op_mode
*pipeline
= &ctl
->pipeline
;
242 struct mdp5_interface
*intf
= &ctl
->pipeline
.intf
;
244 pipeline
->start_mask
= mdp_ctl_flush_mask_lm(ctl
->lm
);
247 * Writeback encoder needs to program & flush
248 * address registers for each page flip..
250 if (intf
->type
== INTF_WB
)
251 pipeline
->start_mask
|= mdp_ctl_flush_mask_encoder(intf
);
255 * mdp5_ctl_set_encoder_state() - set the encoder state
257 * @enable: true, when encoder is ready for data streaming; false, otherwise.
260 * This encoder state is needed to trigger START signal (data path kickoff).
262 int mdp5_ctl_set_encoder_state(struct mdp5_ctl
*ctl
, bool enabled
)
267 ctl
->pipeline
.encoder_enabled
= enabled
;
268 DBG("intf_%d: %s", ctl
->pipeline
.intf
.num
, enabled
? "on" : "off");
270 if (start_signal_needed(ctl
)) {
271 send_start_signal(ctl
);
272 refill_start_mask(ctl
);
280 * CTL registers need to be flushed after calling this function
281 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
283 int mdp5_ctl_set_cursor(struct mdp5_ctl
*ctl
, int cursor_id
, bool enable
)
285 struct mdp5_ctl_manager
*ctl_mgr
= ctl
->ctlm
;
290 if (unlikely(WARN_ON(lm
< 0))) {
291 dev_err(ctl_mgr
->dev
->dev
, "CTL %d cannot find LM: %d",
296 spin_lock_irqsave(&ctl
->hw_lock
, flags
);
298 blend_cfg
= ctl_read(ctl
, REG_MDP5_CTL_LAYER_REG(ctl
->id
, lm
));
301 blend_cfg
|= MDP5_CTL_LAYER_REG_CURSOR_OUT
;
303 blend_cfg
&= ~MDP5_CTL_LAYER_REG_CURSOR_OUT
;
305 ctl_write(ctl
, REG_MDP5_CTL_LAYER_REG(ctl
->id
, lm
), blend_cfg
);
306 ctl
->cursor_on
= enable
;
308 spin_unlock_irqrestore(&ctl
->hw_lock
, flags
);
310 ctl
->pending_ctl_trigger
= mdp_ctl_flush_mask_cursor(cursor_id
);
315 static u32
mdp_ctl_blend_mask(enum mdp5_pipe pipe
,
316 enum mdp_mixer_stage_id stage
)
319 case SSPP_VIG0
: return MDP5_CTL_LAYER_REG_VIG0(stage
);
320 case SSPP_VIG1
: return MDP5_CTL_LAYER_REG_VIG1(stage
);
321 case SSPP_VIG2
: return MDP5_CTL_LAYER_REG_VIG2(stage
);
322 case SSPP_RGB0
: return MDP5_CTL_LAYER_REG_RGB0(stage
);
323 case SSPP_RGB1
: return MDP5_CTL_LAYER_REG_RGB1(stage
);
324 case SSPP_RGB2
: return MDP5_CTL_LAYER_REG_RGB2(stage
);
325 case SSPP_DMA0
: return MDP5_CTL_LAYER_REG_DMA0(stage
);
326 case SSPP_DMA1
: return MDP5_CTL_LAYER_REG_DMA1(stage
);
327 case SSPP_VIG3
: return MDP5_CTL_LAYER_REG_VIG3(stage
);
328 case SSPP_RGB3
: return MDP5_CTL_LAYER_REG_RGB3(stage
);
333 static u32
mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe
,
334 enum mdp_mixer_stage_id stage
)
340 case SSPP_VIG0
: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3
;
341 case SSPP_VIG1
: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3
;
342 case SSPP_VIG2
: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3
;
343 case SSPP_RGB0
: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3
;
344 case SSPP_RGB1
: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3
;
345 case SSPP_RGB2
: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3
;
346 case SSPP_DMA0
: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3
;
347 case SSPP_DMA1
: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3
;
348 case SSPP_VIG3
: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3
;
349 case SSPP_RGB3
: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3
;
354 int mdp5_ctl_blend(struct mdp5_ctl
*ctl
, u8
*stage
, u32 stage_cnt
,
355 u32 ctl_blend_op_flags
)
358 u32 blend_cfg
= 0, blend_ext_cfg
= 0;
361 if (ctl_blend_op_flags
& MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT
) {
362 start_stage
= STAGE0
;
363 blend_cfg
|= MDP5_CTL_LAYER_REG_BORDER_COLOR
;
365 start_stage
= STAGE_BASE
;
368 for (i
= start_stage
; i
< start_stage
+ stage_cnt
; i
++) {
369 blend_cfg
|= mdp_ctl_blend_mask(stage
[i
], i
);
370 blend_ext_cfg
|= mdp_ctl_blend_ext_mask(stage
[i
], i
);
373 spin_lock_irqsave(&ctl
->hw_lock
, flags
);
375 blend_cfg
|= MDP5_CTL_LAYER_REG_CURSOR_OUT
;
377 ctl_write(ctl
, REG_MDP5_CTL_LAYER_REG(ctl
->id
, ctl
->lm
), blend_cfg
);
378 ctl_write(ctl
, REG_MDP5_CTL_LAYER_EXT_REG(ctl
->id
, ctl
->lm
), blend_ext_cfg
);
379 spin_unlock_irqrestore(&ctl
->hw_lock
, flags
);
381 ctl
->pending_ctl_trigger
= mdp_ctl_flush_mask_lm(ctl
->lm
);
383 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl
->lm
,
384 blend_cfg
, blend_ext_cfg
);
389 u32
mdp_ctl_flush_mask_encoder(struct mdp5_interface
*intf
)
391 if (intf
->type
== INTF_WB
)
392 return MDP5_CTL_FLUSH_WB
;
395 case 0: return MDP5_CTL_FLUSH_TIMING_0
;
396 case 1: return MDP5_CTL_FLUSH_TIMING_1
;
397 case 2: return MDP5_CTL_FLUSH_TIMING_2
;
398 case 3: return MDP5_CTL_FLUSH_TIMING_3
;
403 u32
mdp_ctl_flush_mask_cursor(int cursor_id
)
406 case 0: return MDP5_CTL_FLUSH_CURSOR_0
;
407 case 1: return MDP5_CTL_FLUSH_CURSOR_1
;
412 u32
mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe
)
415 case SSPP_VIG0
: return MDP5_CTL_FLUSH_VIG0
;
416 case SSPP_VIG1
: return MDP5_CTL_FLUSH_VIG1
;
417 case SSPP_VIG2
: return MDP5_CTL_FLUSH_VIG2
;
418 case SSPP_RGB0
: return MDP5_CTL_FLUSH_RGB0
;
419 case SSPP_RGB1
: return MDP5_CTL_FLUSH_RGB1
;
420 case SSPP_RGB2
: return MDP5_CTL_FLUSH_RGB2
;
421 case SSPP_DMA0
: return MDP5_CTL_FLUSH_DMA0
;
422 case SSPP_DMA1
: return MDP5_CTL_FLUSH_DMA1
;
423 case SSPP_VIG3
: return MDP5_CTL_FLUSH_VIG3
;
424 case SSPP_RGB3
: return MDP5_CTL_FLUSH_RGB3
;
429 u32
mdp_ctl_flush_mask_lm(int lm
)
432 case 0: return MDP5_CTL_FLUSH_LM0
;
433 case 1: return MDP5_CTL_FLUSH_LM1
;
434 case 2: return MDP5_CTL_FLUSH_LM2
;
435 case 5: return MDP5_CTL_FLUSH_LM5
;
440 static u32
fix_sw_flush(struct mdp5_ctl
*ctl
, u32 flush_mask
)
442 struct mdp5_ctl_manager
*ctl_mgr
= ctl
->ctlm
;
444 #define BIT_NEEDS_SW_FIX(bit) \
445 (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
447 /* for some targets, cursor bit is the same as LM bit */
448 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0
))
449 sw_mask
|= mdp_ctl_flush_mask_lm(ctl
->lm
);
454 static void fix_for_single_flush(struct mdp5_ctl
*ctl
, u32
*flush_mask
,
457 struct mdp5_ctl_manager
*ctl_mgr
= ctl
->ctlm
;
460 DBG("CTL %d FLUSH pending mask %x", ctl
->id
, *flush_mask
);
461 ctl
->flush_pending
= true;
462 ctl_mgr
->single_flush_pending_mask
|= (*flush_mask
);
465 if (ctl
->pair
->flush_pending
) {
466 *flush_id
= min_t(u32
, ctl
->id
, ctl
->pair
->id
);
467 *flush_mask
= ctl_mgr
->single_flush_pending_mask
;
469 ctl
->flush_pending
= false;
470 ctl
->pair
->flush_pending
= false;
471 ctl_mgr
->single_flush_pending_mask
= 0;
473 DBG("Single FLUSH mask %x,ID %d", *flush_mask
,
480 * mdp5_ctl_commit() - Register Flush
482 * The flush register is used to indicate several registers are all
483 * programmed, and are safe to update to the back copy of the double
484 * buffered registers.
486 * Some registers FLUSH bits are shared when the hardware does not have
487 * dedicated bits for them; handling these is the job of fix_sw_flush().
489 * CTL registers need to be flushed in some circumstances; if that is the
490 * case, some trigger bits will be present in both flush mask and
491 * ctl->pending_ctl_trigger.
493 * Return H/W flushed bit mask.
495 u32
mdp5_ctl_commit(struct mdp5_ctl
*ctl
, u32 flush_mask
)
497 struct mdp5_ctl_manager
*ctl_mgr
= ctl
->ctlm
;
498 struct op_mode
*pipeline
= &ctl
->pipeline
;
500 u32 flush_id
= ctl
->id
;
501 u32 curr_ctl_flush_mask
;
503 pipeline
->start_mask
&= ~flush_mask
;
505 VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask
,
506 pipeline
->start_mask
, ctl
->pending_ctl_trigger
);
508 if (ctl
->pending_ctl_trigger
& flush_mask
) {
509 flush_mask
|= MDP5_CTL_FLUSH_CTL
;
510 ctl
->pending_ctl_trigger
= 0;
513 flush_mask
|= fix_sw_flush(ctl
, flush_mask
);
515 flush_mask
&= ctl_mgr
->flush_hw_mask
;
517 curr_ctl_flush_mask
= flush_mask
;
519 fix_for_single_flush(ctl
, &flush_mask
, &flush_id
);
522 spin_lock_irqsave(&ctl
->hw_lock
, flags
);
523 ctl_write(ctl
, REG_MDP5_CTL_FLUSH(flush_id
), flush_mask
);
524 spin_unlock_irqrestore(&ctl
->hw_lock
, flags
);
527 if (start_signal_needed(ctl
)) {
528 send_start_signal(ctl
);
529 refill_start_mask(ctl
);
532 return curr_ctl_flush_mask
;
535 u32
mdp5_ctl_get_commit_status(struct mdp5_ctl
*ctl
)
537 return ctl_read(ctl
, REG_MDP5_CTL_FLUSH(ctl
->id
));
540 int mdp5_ctl_get_ctl_id(struct mdp5_ctl
*ctl
)
542 return WARN_ON(!ctl
) ? -EINVAL
: ctl
->id
;
546 * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
548 int mdp5_ctl_pair(struct mdp5_ctl
*ctlx
, struct mdp5_ctl
*ctly
, bool enable
)
550 struct mdp5_ctl_manager
*ctl_mgr
= ctlx
->ctlm
;
551 struct mdp5_kms
*mdp5_kms
= get_kms(ctl_mgr
);
553 /* do nothing silently if hw doesn't support */
554 if (!ctl_mgr
->single_flush_supported
)
560 mdp5_write(mdp5_kms
, REG_MDP5_MDP_SPARE_0(0), 0);
562 } else if ((ctlx
->pair
!= NULL
) || (ctly
->pair
!= NULL
)) {
563 dev_err(ctl_mgr
->dev
->dev
, "CTLs already paired\n");
565 } else if (!(ctlx
->status
& ctly
->status
& CTL_STAT_BOOKED
)) {
566 dev_err(ctl_mgr
->dev
->dev
, "Only pair booked CTLs\n");
573 mdp5_write(mdp5_kms
, REG_MDP5_MDP_SPARE_0(0),
574 MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN
);
580 * mdp5_ctl_request() - CTL allocation
582 * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
583 * If no CTL is available in preferred category, allocate from the other one.
585 * @return fail if no CTL is available.
587 struct mdp5_ctl
*mdp5_ctlm_request(struct mdp5_ctl_manager
*ctl_mgr
,
590 struct mdp5_ctl
*ctl
= NULL
;
591 const u32 checkm
= CTL_STAT_BUSY
| CTL_STAT_BOOKED
;
592 u32 match
= ((intf_num
== 1) || (intf_num
== 2)) ? CTL_STAT_BOOKED
: 0;
596 spin_lock_irqsave(&ctl_mgr
->pool_lock
, flags
);
598 /* search the preferred */
599 for (c
= 0; c
< ctl_mgr
->nctl
; c
++)
600 if ((ctl_mgr
->ctls
[c
].status
& checkm
) == match
)
603 dev_warn(ctl_mgr
->dev
->dev
,
604 "fall back to the other CTL category for INTF %d!\n", intf_num
);
606 match
^= CTL_STAT_BOOKED
;
607 for (c
= 0; c
< ctl_mgr
->nctl
; c
++)
608 if ((ctl_mgr
->ctls
[c
].status
& checkm
) == match
)
611 dev_err(ctl_mgr
->dev
->dev
, "No more CTL available!");
615 ctl
= &ctl_mgr
->ctls
[c
];
616 ctl
->pipeline
.intf
.num
= intf_num
;
618 ctl
->status
|= CTL_STAT_BUSY
;
619 ctl
->pending_ctl_trigger
= 0;
620 DBG("CTL %d allocated", ctl
->id
);
623 spin_unlock_irqrestore(&ctl_mgr
->pool_lock
, flags
);
627 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager
*ctl_mgr
)
632 for (c
= 0; c
< ctl_mgr
->nctl
; c
++) {
633 struct mdp5_ctl
*ctl
= &ctl_mgr
->ctls
[c
];
635 spin_lock_irqsave(&ctl
->hw_lock
, flags
);
636 ctl_write(ctl
, REG_MDP5_CTL_OP(ctl
->id
), 0);
637 spin_unlock_irqrestore(&ctl
->hw_lock
, flags
);
641 void mdp5_ctlm_destroy(struct mdp5_ctl_manager
*ctl_mgr
)
646 struct mdp5_ctl_manager
*mdp5_ctlm_init(struct drm_device
*dev
,
647 void __iomem
*mmio_base
, struct mdp5_cfg_handler
*cfg_hnd
)
649 struct mdp5_ctl_manager
*ctl_mgr
;
650 const struct mdp5_cfg_hw
*hw_cfg
= mdp5_cfg_get_hw_config(cfg_hnd
);
651 int rev
= mdp5_cfg_get_hw_rev(cfg_hnd
);
652 const struct mdp5_ctl_block
*ctl_cfg
= &hw_cfg
->ctl
;
656 ctl_mgr
= kzalloc(sizeof(*ctl_mgr
), GFP_KERNEL
);
658 dev_err(dev
->dev
, "failed to allocate CTL manager\n");
663 if (unlikely(WARN_ON(ctl_cfg
->count
> MAX_CTL
))) {
664 dev_err(dev
->dev
, "Increase static pool size to at least %d\n",
670 /* initialize the CTL manager: */
672 ctl_mgr
->nlm
= hw_cfg
->lm
.count
;
673 ctl_mgr
->nctl
= ctl_cfg
->count
;
674 ctl_mgr
->flush_hw_mask
= ctl_cfg
->flush_hw_mask
;
675 spin_lock_init(&ctl_mgr
->pool_lock
);
677 /* initialize each CTL of the pool: */
678 spin_lock_irqsave(&ctl_mgr
->pool_lock
, flags
);
679 for (c
= 0; c
< ctl_mgr
->nctl
; c
++) {
680 struct mdp5_ctl
*ctl
= &ctl_mgr
->ctls
[c
];
682 if (WARN_ON(!ctl_cfg
->base
[c
])) {
683 dev_err(dev
->dev
, "CTL_%d: base is null!\n", c
);
685 spin_unlock_irqrestore(&ctl_mgr
->pool_lock
, flags
);
690 ctl
->reg_offset
= ctl_cfg
->base
[c
];
692 spin_lock_init(&ctl
->hw_lock
);
696 * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
697 * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
698 * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
699 * Single FLUSH is supported from hw rev v3.0.
702 ctl_mgr
->single_flush_supported
= true;
703 /* Reserve CTL0/1 for INTF1/2 */
704 ctl_mgr
->ctls
[0].status
|= CTL_STAT_BOOKED
;
705 ctl_mgr
->ctls
[1].status
|= CTL_STAT_BOOKED
;
707 spin_unlock_irqrestore(&ctl_mgr
->pool_lock
, flags
);
708 DBG("Pool of %d CTLs created.", ctl_mgr
->nctl
);
714 mdp5_ctlm_destroy(ctl_mgr
);