2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
18 * CTL - MDP Control Pool Manager
20 * Controls are shared between all CRTCs.
22 * They are intended to be used for data path configuration.
23 * The top level register programming describes the complete data path for
24 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
26 * Hardware capabilities determine the number of concurrent data paths
28 * In certain use cases (high-resolution dual pipe), one single CTL can be
29 * shared across multiple CRTCs.
31 * Because the number of CTLs can be less than the number of CRTCs,
32 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
33 * requested by the client (in mdp5_crtc_mode_set()).
37 struct mdp5_interface intf
;
44 struct mdp5_ctl_manager
*ctlm
;
49 /* whether this CTL has been allocated or not: */
52 /* Operation Mode Configuration for the Pipeline */
53 struct op_mode pipeline
;
55 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
59 /* when do CTL registers need to be flushed? (mask of trigger bits) */
60 u32 pending_ctl_trigger
;
64 struct drm_crtc
*crtc
;
67 struct mdp5_ctl_manager
{
68 struct drm_device
*dev
;
70 /* number of CTL / Layer Mixers in this hw config: */
74 /* to filter out non-present bits in the current hardware config */
77 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
79 struct mdp5_ctl ctls
[MAX_CTL
];
83 struct mdp5_kms
*get_kms(struct mdp5_ctl_manager
*ctl_mgr
)
85 struct msm_drm_private
*priv
= ctl_mgr
->dev
->dev_private
;
87 return to_mdp5_kms(to_mdp_kms(priv
->kms
));
91 void ctl_write(struct mdp5_ctl
*ctl
, u32 reg
, u32 data
)
93 struct mdp5_kms
*mdp5_kms
= get_kms(ctl
->ctlm
);
95 (void)ctl
->reg_offset
; /* TODO use this instead of mdp5_write */
96 mdp5_write(mdp5_kms
, reg
, data
);
100 u32
ctl_read(struct mdp5_ctl
*ctl
, u32 reg
)
102 struct mdp5_kms
*mdp5_kms
= get_kms(ctl
->ctlm
);
104 (void)ctl
->reg_offset
; /* TODO use this instead of mdp5_write */
105 return mdp5_read(mdp5_kms
, reg
);
108 static void set_display_intf(struct mdp5_kms
*mdp5_kms
,
109 struct mdp5_interface
*intf
)
114 spin_lock_irqsave(&mdp5_kms
->resource_lock
, flags
);
115 intf_sel
= mdp5_read(mdp5_kms
, REG_MDP5_MDP_DISP_INTF_SEL(0));
119 intf_sel
&= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK
;
120 intf_sel
|= MDP5_MDP_DISP_INTF_SEL_INTF0(intf
->type
);
123 intf_sel
&= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK
;
124 intf_sel
|= MDP5_MDP_DISP_INTF_SEL_INTF1(intf
->type
);
127 intf_sel
&= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK
;
128 intf_sel
|= MDP5_MDP_DISP_INTF_SEL_INTF2(intf
->type
);
131 intf_sel
&= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK
;
132 intf_sel
|= MDP5_MDP_DISP_INTF_SEL_INTF3(intf
->type
);
139 mdp5_write(mdp5_kms
, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel
);
140 spin_unlock_irqrestore(&mdp5_kms
->resource_lock
, flags
);
143 static void set_ctl_op(struct mdp5_ctl
*ctl
, struct mdp5_interface
*intf
)
148 if (!mdp5_cfg_intf_is_virtual(intf
->type
))
149 ctl_op
|= MDP5_CTL_OP_INTF_NUM(INTF0
+ intf
->num
);
151 switch (intf
->type
) {
153 if (intf
->mode
== MDP5_INTF_DSI_MODE_COMMAND
)
154 ctl_op
|= MDP5_CTL_OP_CMD_MODE
;
158 if (intf
->mode
== MDP5_INTF_WB_MODE_LINE
)
159 ctl_op
|= MDP5_CTL_OP_MODE(MODE_WB_2_LINE
);
166 spin_lock_irqsave(&ctl
->hw_lock
, flags
);
167 ctl_write(ctl
, REG_MDP5_CTL_OP(ctl
->id
), ctl_op
);
168 spin_unlock_irqrestore(&ctl
->hw_lock
, flags
);
171 int mdp5_ctl_set_intf(struct mdp5_ctl
*ctl
, struct mdp5_interface
*intf
)
173 struct mdp5_ctl_manager
*ctl_mgr
= ctl
->ctlm
;
174 struct mdp5_kms
*mdp5_kms
= get_kms(ctl_mgr
);
176 memcpy(&ctl
->pipeline
.intf
, intf
, sizeof(*intf
));
178 ctl
->pipeline
.start_mask
= mdp_ctl_flush_mask_lm(ctl
->lm
) |
179 mdp_ctl_flush_mask_encoder(intf
);
181 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
182 if (!mdp5_cfg_intf_is_virtual(intf
->type
))
183 set_display_intf(mdp5_kms
, intf
);
185 set_ctl_op(ctl
, intf
);
190 static bool start_signal_needed(struct mdp5_ctl
*ctl
)
192 struct op_mode
*pipeline
= &ctl
->pipeline
;
194 if (!pipeline
->encoder_enabled
|| pipeline
->start_mask
!= 0)
197 switch (pipeline
->intf
.type
) {
201 return pipeline
->intf
.mode
== MDP5_INTF_DSI_MODE_COMMAND
;
208 * send_start_signal() - Overlay Processor Start Signal
210 * For a given control operation (display pipeline), a START signal needs to be
211 * executed in order to kick off operation and activate all layers.
212 * e.g.: DSI command mode, Writeback
214 static void send_start_signal(struct mdp5_ctl
*ctl
)
218 spin_lock_irqsave(&ctl
->hw_lock
, flags
);
219 ctl_write(ctl
, REG_MDP5_CTL_START(ctl
->id
), 1);
220 spin_unlock_irqrestore(&ctl
->hw_lock
, flags
);
223 static void refill_start_mask(struct mdp5_ctl
*ctl
)
225 struct op_mode
*pipeline
= &ctl
->pipeline
;
226 struct mdp5_interface
*intf
= &ctl
->pipeline
.intf
;
228 pipeline
->start_mask
= mdp_ctl_flush_mask_lm(ctl
->lm
);
231 * Writeback encoder needs to program & flush
232 * address registers for each page flip..
234 if (intf
->type
== INTF_WB
)
235 pipeline
->start_mask
|= mdp_ctl_flush_mask_encoder(intf
);
239 * mdp5_ctl_set_encoder_state() - set the encoder state
241 * @enable: true, when encoder is ready for data streaming; false, otherwise.
244 * This encoder state is needed to trigger START signal (data path kickoff).
246 int mdp5_ctl_set_encoder_state(struct mdp5_ctl
*ctl
, bool enabled
)
251 ctl
->pipeline
.encoder_enabled
= enabled
;
252 DBG("intf_%d: %s", ctl
->pipeline
.intf
.num
, enabled
? "on" : "off");
254 if (start_signal_needed(ctl
)) {
255 send_start_signal(ctl
);
256 refill_start_mask(ctl
);
264 * CTL registers need to be flushed after calling this function
265 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
267 int mdp5_ctl_set_cursor(struct mdp5_ctl
*ctl
, int cursor_id
, bool enable
)
269 struct mdp5_ctl_manager
*ctl_mgr
= ctl
->ctlm
;
274 if (unlikely(WARN_ON(lm
< 0))) {
275 dev_err(ctl_mgr
->dev
->dev
, "CTL %d cannot find LM: %d",
280 spin_lock_irqsave(&ctl
->hw_lock
, flags
);
282 blend_cfg
= ctl_read(ctl
, REG_MDP5_CTL_LAYER_REG(ctl
->id
, lm
));
285 blend_cfg
|= MDP5_CTL_LAYER_REG_CURSOR_OUT
;
287 blend_cfg
&= ~MDP5_CTL_LAYER_REG_CURSOR_OUT
;
289 ctl_write(ctl
, REG_MDP5_CTL_LAYER_REG(ctl
->id
, lm
), blend_cfg
);
291 spin_unlock_irqrestore(&ctl
->hw_lock
, flags
);
293 ctl
->pending_ctl_trigger
= mdp_ctl_flush_mask_cursor(cursor_id
);
294 ctl
->cursor_on
= enable
;
299 int mdp5_ctl_blend(struct mdp5_ctl
*ctl
, u32 lm
, u32 blend_cfg
)
304 blend_cfg
|= MDP5_CTL_LAYER_REG_CURSOR_OUT
;
306 blend_cfg
&= ~MDP5_CTL_LAYER_REG_CURSOR_OUT
;
308 spin_lock_irqsave(&ctl
->hw_lock
, flags
);
309 ctl_write(ctl
, REG_MDP5_CTL_LAYER_REG(ctl
->id
, lm
), blend_cfg
);
310 spin_unlock_irqrestore(&ctl
->hw_lock
, flags
);
312 ctl
->pending_ctl_trigger
= mdp_ctl_flush_mask_lm(lm
);
317 u32
mdp_ctl_flush_mask_encoder(struct mdp5_interface
*intf
)
319 if (intf
->type
== INTF_WB
)
320 return MDP5_CTL_FLUSH_WB
;
323 case 0: return MDP5_CTL_FLUSH_TIMING_0
;
324 case 1: return MDP5_CTL_FLUSH_TIMING_1
;
325 case 2: return MDP5_CTL_FLUSH_TIMING_2
;
326 case 3: return MDP5_CTL_FLUSH_TIMING_3
;
331 u32
mdp_ctl_flush_mask_cursor(int cursor_id
)
334 case 0: return MDP5_CTL_FLUSH_CURSOR_0
;
335 case 1: return MDP5_CTL_FLUSH_CURSOR_1
;
340 u32
mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe
)
343 case SSPP_VIG0
: return MDP5_CTL_FLUSH_VIG0
;
344 case SSPP_VIG1
: return MDP5_CTL_FLUSH_VIG1
;
345 case SSPP_VIG2
: return MDP5_CTL_FLUSH_VIG2
;
346 case SSPP_RGB0
: return MDP5_CTL_FLUSH_RGB0
;
347 case SSPP_RGB1
: return MDP5_CTL_FLUSH_RGB1
;
348 case SSPP_RGB2
: return MDP5_CTL_FLUSH_RGB2
;
349 case SSPP_DMA0
: return MDP5_CTL_FLUSH_DMA0
;
350 case SSPP_DMA1
: return MDP5_CTL_FLUSH_DMA1
;
351 case SSPP_VIG3
: return MDP5_CTL_FLUSH_VIG3
;
352 case SSPP_RGB3
: return MDP5_CTL_FLUSH_RGB3
;
357 u32
mdp_ctl_flush_mask_lm(int lm
)
360 case 0: return MDP5_CTL_FLUSH_LM0
;
361 case 1: return MDP5_CTL_FLUSH_LM1
;
362 case 2: return MDP5_CTL_FLUSH_LM2
;
363 case 5: return MDP5_CTL_FLUSH_LM5
;
368 static u32
fix_sw_flush(struct mdp5_ctl
*ctl
, u32 flush_mask
)
370 struct mdp5_ctl_manager
*ctl_mgr
= ctl
->ctlm
;
372 #define BIT_NEEDS_SW_FIX(bit) \
373 (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
375 /* for some targets, cursor bit is the same as LM bit */
376 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0
))
377 sw_mask
|= mdp_ctl_flush_mask_lm(ctl
->lm
);
383 * mdp5_ctl_commit() - Register Flush
385 * The flush register is used to indicate several registers are all
386 * programmed, and are safe to update to the back copy of the double
387 * buffered registers.
389 * Some registers FLUSH bits are shared when the hardware does not have
390 * dedicated bits for them; handling these is the job of fix_sw_flush().
392 * CTL registers need to be flushed in some circumstances; if that is the
393 * case, some trigger bits will be present in both flush mask and
394 * ctl->pending_ctl_trigger.
396 int mdp5_ctl_commit(struct mdp5_ctl
*ctl
, u32 flush_mask
)
398 struct mdp5_ctl_manager
*ctl_mgr
= ctl
->ctlm
;
399 struct op_mode
*pipeline
= &ctl
->pipeline
;
402 pipeline
->start_mask
&= ~flush_mask
;
404 VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask
,
405 pipeline
->start_mask
, ctl
->pending_ctl_trigger
);
407 if (ctl
->pending_ctl_trigger
& flush_mask
) {
408 flush_mask
|= MDP5_CTL_FLUSH_CTL
;
409 ctl
->pending_ctl_trigger
= 0;
412 flush_mask
|= fix_sw_flush(ctl
, flush_mask
);
414 flush_mask
&= ctl_mgr
->flush_hw_mask
;
417 spin_lock_irqsave(&ctl
->hw_lock
, flags
);
418 ctl_write(ctl
, REG_MDP5_CTL_FLUSH(ctl
->id
), flush_mask
);
419 spin_unlock_irqrestore(&ctl
->hw_lock
, flags
);
422 if (start_signal_needed(ctl
)) {
423 send_start_signal(ctl
);
424 refill_start_mask(ctl
);
430 void mdp5_ctl_release(struct mdp5_ctl
*ctl
)
432 struct mdp5_ctl_manager
*ctl_mgr
= ctl
->ctlm
;
435 if (unlikely(WARN_ON(ctl
->id
>= MAX_CTL
) || !ctl
->busy
)) {
436 dev_err(ctl_mgr
->dev
->dev
, "CTL %d in bad state (%d)",
441 spin_lock_irqsave(&ctl_mgr
->pool_lock
, flags
);
443 spin_unlock_irqrestore(&ctl_mgr
->pool_lock
, flags
);
445 DBG("CTL %d released", ctl
->id
);
448 int mdp5_ctl_get_ctl_id(struct mdp5_ctl
*ctl
)
450 return WARN_ON(!ctl
) ? -EINVAL
: ctl
->id
;
454 * mdp5_ctl_request() - CTL dynamic allocation
456 * Note: Current implementation considers that we can only have one CRTC per CTL
458 * @return first free CTL
460 struct mdp5_ctl
*mdp5_ctlm_request(struct mdp5_ctl_manager
*ctl_mgr
,
461 struct drm_crtc
*crtc
)
463 struct mdp5_ctl
*ctl
= NULL
;
467 spin_lock_irqsave(&ctl_mgr
->pool_lock
, flags
);
469 for (c
= 0; c
< ctl_mgr
->nctl
; c
++)
470 if (!ctl_mgr
->ctls
[c
].busy
)
473 if (unlikely(c
>= ctl_mgr
->nctl
)) {
474 dev_err(ctl_mgr
->dev
->dev
, "No more CTL available!");
478 ctl
= &ctl_mgr
->ctls
[c
];
480 ctl
->lm
= mdp5_crtc_get_lm(crtc
);
483 ctl
->pending_ctl_trigger
= 0;
484 DBG("CTL %d allocated", ctl
->id
);
487 spin_unlock_irqrestore(&ctl_mgr
->pool_lock
, flags
);
491 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager
*ctl_mgr
)
496 for (c
= 0; c
< ctl_mgr
->nctl
; c
++) {
497 struct mdp5_ctl
*ctl
= &ctl_mgr
->ctls
[c
];
499 spin_lock_irqsave(&ctl
->hw_lock
, flags
);
500 ctl_write(ctl
, REG_MDP5_CTL_OP(ctl
->id
), 0);
501 spin_unlock_irqrestore(&ctl
->hw_lock
, flags
);
505 void mdp5_ctlm_destroy(struct mdp5_ctl_manager
*ctl_mgr
)
510 struct mdp5_ctl_manager
*mdp5_ctlm_init(struct drm_device
*dev
,
511 void __iomem
*mmio_base
, const struct mdp5_cfg_hw
*hw_cfg
)
513 struct mdp5_ctl_manager
*ctl_mgr
;
514 const struct mdp5_ctl_block
*ctl_cfg
= &hw_cfg
->ctl
;
518 ctl_mgr
= kzalloc(sizeof(*ctl_mgr
), GFP_KERNEL
);
520 dev_err(dev
->dev
, "failed to allocate CTL manager\n");
525 if (unlikely(WARN_ON(ctl_cfg
->count
> MAX_CTL
))) {
526 dev_err(dev
->dev
, "Increase static pool size to at least %d\n",
532 /* initialize the CTL manager: */
534 ctl_mgr
->nlm
= hw_cfg
->lm
.count
;
535 ctl_mgr
->nctl
= ctl_cfg
->count
;
536 ctl_mgr
->flush_hw_mask
= ctl_cfg
->flush_hw_mask
;
537 spin_lock_init(&ctl_mgr
->pool_lock
);
539 /* initialize each CTL of the pool: */
540 spin_lock_irqsave(&ctl_mgr
->pool_lock
, flags
);
541 for (c
= 0; c
< ctl_mgr
->nctl
; c
++) {
542 struct mdp5_ctl
*ctl
= &ctl_mgr
->ctls
[c
];
544 if (WARN_ON(!ctl_cfg
->base
[c
])) {
545 dev_err(dev
->dev
, "CTL_%d: base is null!\n", c
);
551 ctl
->reg_offset
= ctl_cfg
->base
[c
];
553 spin_lock_init(&ctl
->hw_lock
);
555 spin_unlock_irqrestore(&ctl_mgr
->pool_lock
, flags
);
556 DBG("Pool of %d CTLs created.", ctl_mgr
->nctl
);
562 mdp5_ctlm_destroy(ctl_mgr
);