1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #include <linux/sort.h>
10 #include <drm/drm_atomic.h>
11 #include <drm/drm_mode.h>
12 #include <drm/drm_crtc.h>
13 #include <drm/drm_flip_work.h>
14 #include <drm/drm_fourcc.h>
15 #include <drm/drm_probe_helper.h>
16 #include <drm/drm_vblank.h>
21 #define CURSOR_WIDTH 64
22 #define CURSOR_HEIGHT 64
29 spinlock_t lm_lock
; /* protect REG_MDP5_LM_* registers */
31 /* if there is a pending flip, these will be non-null: */
32 struct drm_pending_vblank_event
*event
;
34 /* Bits have been flushed at the last commit,
35 * used to decide if a vsync has happened since last commit.
39 #define PENDING_CURSOR 0x1
40 #define PENDING_FLIP 0x2
43 /* for unref'ing cursor bo's after scanout completes: */
44 struct drm_flip_work unref_cursor_work
;
46 struct mdp_irq vblank
;
48 struct mdp_irq pp_done
;
50 struct completion pp_completion
;
52 bool lm_cursor_enabled
;
55 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
58 /* current cursor being scanned out: */
59 struct drm_gem_object
*scanout_bo
;
61 uint32_t width
, height
;
65 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
67 static void mdp5_crtc_restore_cursor(struct drm_crtc
*crtc
);
69 static struct mdp5_kms
*get_kms(struct drm_crtc
*crtc
)
71 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
72 return to_mdp5_kms(to_mdp_kms(priv
->kms
));
75 static void request_pending(struct drm_crtc
*crtc
, uint32_t pending
)
77 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
79 atomic_or(pending
, &mdp5_crtc
->pending
);
80 mdp_irq_register(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
83 static void request_pp_done_pending(struct drm_crtc
*crtc
)
85 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
86 reinit_completion(&mdp5_crtc
->pp_completion
);
89 static u32
crtc_flush(struct drm_crtc
*crtc
, u32 flush_mask
)
91 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
92 struct mdp5_ctl
*ctl
= mdp5_cstate
->ctl
;
93 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
94 bool start
= !mdp5_cstate
->defer_start
;
96 mdp5_cstate
->defer_start
= false;
98 DBG("%s: flush=%08x", crtc
->name
, flush_mask
);
100 return mdp5_ctl_commit(ctl
, pipeline
, flush_mask
, start
);
104 * flush updates, to make sure hw is updated to new scanout fb,
105 * so that we can safely queue unref to current fb (ie. next
106 * vblank we know hw is done w/ previous scanout_fb).
108 static u32
crtc_flush_all(struct drm_crtc
*crtc
)
110 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
111 struct mdp5_hw_mixer
*mixer
, *r_mixer
;
112 struct drm_plane
*plane
;
113 uint32_t flush_mask
= 0;
115 /* this should not happen: */
116 if (WARN_ON(!mdp5_cstate
->ctl
))
119 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
120 if (!plane
->state
->visible
)
122 flush_mask
|= mdp5_plane_get_flush(plane
);
125 mixer
= mdp5_cstate
->pipeline
.mixer
;
126 flush_mask
|= mdp_ctl_flush_mask_lm(mixer
->lm
);
128 r_mixer
= mdp5_cstate
->pipeline
.r_mixer
;
130 flush_mask
|= mdp_ctl_flush_mask_lm(r_mixer
->lm
);
132 return crtc_flush(crtc
, flush_mask
);
135 /* if file!=NULL, this is preclose potential cancel-flip path */
136 static void complete_flip(struct drm_crtc
*crtc
, struct drm_file
*file
)
138 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
139 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
140 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
141 struct mdp5_ctl
*ctl
= mdp5_cstate
->ctl
;
142 struct drm_device
*dev
= crtc
->dev
;
143 struct drm_pending_vblank_event
*event
;
146 spin_lock_irqsave(&dev
->event_lock
, flags
);
147 event
= mdp5_crtc
->event
;
149 mdp5_crtc
->event
= NULL
;
150 DBG("%s: send event: %p", crtc
->name
, event
);
151 drm_crtc_send_vblank_event(crtc
, event
);
153 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
155 if (ctl
&& !crtc
->state
->enable
) {
156 /* set STAGE_UNUSED for all layers */
157 mdp5_ctl_blend(ctl
, pipeline
, NULL
, NULL
, 0, 0);
158 /* XXX: What to do here? */
159 /* mdp5_crtc->ctl = NULL; */
163 static void unref_cursor_worker(struct drm_flip_work
*work
, void *val
)
165 struct mdp5_crtc
*mdp5_crtc
=
166 container_of(work
, struct mdp5_crtc
, unref_cursor_work
);
167 struct mdp5_kms
*mdp5_kms
= get_kms(&mdp5_crtc
->base
);
168 struct msm_kms
*kms
= &mdp5_kms
->base
.base
;
170 msm_gem_unpin_iova(val
, kms
->aspace
);
171 drm_gem_object_put(val
);
174 static void mdp5_crtc_destroy(struct drm_crtc
*crtc
)
176 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
178 drm_crtc_cleanup(crtc
);
179 drm_flip_work_cleanup(&mdp5_crtc
->unref_cursor_work
);
184 static inline u32
mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage
)
187 case STAGE0
: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA
;
188 case STAGE1
: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA
;
189 case STAGE2
: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA
;
190 case STAGE3
: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA
;
191 case STAGE4
: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA
;
192 case STAGE5
: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA
;
193 case STAGE6
: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA
;
200 * left/right pipe offsets for the stage array used in blend_setup()
206 * blend_setup() - blend all the planes of a CRTC
208 * If no base layer is available, border will be enabled as the base layer.
209 * Otherwise all layers will be blended based on their stage calculated
210 * in mdp5_crtc_atomic_check.
212 static void blend_setup(struct drm_crtc
*crtc
)
214 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
215 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
216 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
217 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
218 struct drm_plane
*plane
;
219 struct mdp5_plane_state
*pstate
, *pstates
[STAGE_MAX
+ 1] = {NULL
};
220 const struct mdp_format
*format
;
221 struct mdp5_hw_mixer
*mixer
= pipeline
->mixer
;
222 uint32_t lm
= mixer
->lm
;
223 struct mdp5_hw_mixer
*r_mixer
= pipeline
->r_mixer
;
224 uint32_t r_lm
= r_mixer
? r_mixer
->lm
: 0;
225 struct mdp5_ctl
*ctl
= mdp5_cstate
->ctl
;
226 uint32_t blend_op
, fg_alpha
, bg_alpha
, ctl_blend_flags
= 0;
228 enum mdp5_pipe stage
[STAGE_MAX
+ 1][MAX_PIPE_STAGE
] = { { SSPP_NONE
} };
229 enum mdp5_pipe r_stage
[STAGE_MAX
+ 1][MAX_PIPE_STAGE
] = { { SSPP_NONE
} };
230 int i
, plane_cnt
= 0;
231 bool bg_alpha_enabled
= false;
232 u32 mixer_op_mode
= 0;
234 #define blender(stage) ((stage) - STAGE0)
236 spin_lock_irqsave(&mdp5_crtc
->lm_lock
, flags
);
238 /* ctl could be released already when we are shutting down: */
239 /* XXX: Can this happen now? */
243 /* Collect all plane information */
244 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
245 enum mdp5_pipe right_pipe
;
247 if (!plane
->state
->visible
)
250 pstate
= to_mdp5_plane_state(plane
->state
);
251 pstates
[pstate
->stage
] = pstate
;
252 stage
[pstate
->stage
][PIPE_LEFT
] = mdp5_plane_pipe(plane
);
254 * if we have a right mixer, stage the same pipe as we
255 * have on the left mixer
258 r_stage
[pstate
->stage
][PIPE_LEFT
] =
259 mdp5_plane_pipe(plane
);
261 * if we have a right pipe (i.e, the plane comprises of 2
262 * hwpipes, then stage the right pipe on the right side of both
265 right_pipe
= mdp5_plane_right_pipe(plane
);
267 stage
[pstate
->stage
][PIPE_RIGHT
] = right_pipe
;
268 r_stage
[pstate
->stage
][PIPE_RIGHT
] = right_pipe
;
274 if (!pstates
[STAGE_BASE
]) {
275 ctl_blend_flags
|= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT
;
276 DBG("Border Color is enabled");
277 } else if (plane_cnt
) {
278 format
= to_mdp_format(msm_framebuffer_format(pstates
[STAGE_BASE
]->base
.fb
));
280 if (format
->alpha_enable
)
281 bg_alpha_enabled
= true;
284 /* The reset for blending */
285 for (i
= STAGE0
; i
<= STAGE_MAX
; i
++) {
289 format
= to_mdp_format(
290 msm_framebuffer_format(pstates
[i
]->base
.fb
));
291 plane
= pstates
[i
]->base
.plane
;
292 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST
) |
293 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST
);
294 fg_alpha
= pstates
[i
]->alpha
;
295 bg_alpha
= 0xFF - pstates
[i
]->alpha
;
297 if (!format
->alpha_enable
&& bg_alpha_enabled
)
300 mixer_op_mode
|= mdp5_lm_use_fg_alpha_mask(i
);
302 DBG("Stage %d fg_alpha %x bg_alpha %x", i
, fg_alpha
, bg_alpha
);
304 if (format
->alpha_enable
&& pstates
[i
]->premultiplied
) {
305 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST
) |
306 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL
);
307 if (fg_alpha
!= 0xff) {
310 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA
|
311 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA
;
313 blend_op
|= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA
;
315 } else if (format
->alpha_enable
) {
316 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL
) |
317 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL
);
318 if (fg_alpha
!= 0xff) {
321 MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA
|
322 MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA
|
323 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA
|
324 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA
;
326 blend_op
|= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA
;
330 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_OP_MODE(lm
,
331 blender(i
)), blend_op
);
332 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_FG_ALPHA(lm
,
333 blender(i
)), fg_alpha
);
334 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_BG_ALPHA(lm
,
335 blender(i
)), bg_alpha
);
337 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_OP_MODE(r_lm
,
338 blender(i
)), blend_op
);
339 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm
,
340 blender(i
)), fg_alpha
);
341 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm
,
342 blender(i
)), bg_alpha
);
346 val
= mdp5_read(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(lm
));
347 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(lm
),
348 val
| mixer_op_mode
);
350 val
= mdp5_read(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm
));
351 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm
),
352 val
| mixer_op_mode
);
355 mdp5_ctl_blend(ctl
, pipeline
, stage
, r_stage
, plane_cnt
,
358 spin_unlock_irqrestore(&mdp5_crtc
->lm_lock
, flags
);
361 static void mdp5_crtc_mode_set_nofb(struct drm_crtc
*crtc
)
363 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
364 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
365 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
366 struct mdp5_hw_mixer
*mixer
= mdp5_cstate
->pipeline
.mixer
;
367 struct mdp5_hw_mixer
*r_mixer
= mdp5_cstate
->pipeline
.r_mixer
;
368 uint32_t lm
= mixer
->lm
;
369 u32 mixer_width
, val
;
371 struct drm_display_mode
*mode
;
373 if (WARN_ON(!crtc
->state
))
376 mode
= &crtc
->state
->adjusted_mode
;
378 DBG("%s: set mode: " DRM_MODE_FMT
, crtc
->name
, DRM_MODE_ARG(mode
));
380 mixer_width
= mode
->hdisplay
;
384 spin_lock_irqsave(&mdp5_crtc
->lm_lock
, flags
);
385 mdp5_write(mdp5_kms
, REG_MDP5_LM_OUT_SIZE(lm
),
386 MDP5_LM_OUT_SIZE_WIDTH(mixer_width
) |
387 MDP5_LM_OUT_SIZE_HEIGHT(mode
->vdisplay
));
389 /* Assign mixer to LEFT side in source split mode */
390 val
= mdp5_read(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(lm
));
391 val
&= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT
;
392 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(lm
), val
);
395 u32 r_lm
= r_mixer
->lm
;
397 mdp5_write(mdp5_kms
, REG_MDP5_LM_OUT_SIZE(r_lm
),
398 MDP5_LM_OUT_SIZE_WIDTH(mixer_width
) |
399 MDP5_LM_OUT_SIZE_HEIGHT(mode
->vdisplay
));
401 /* Assign mixer to RIGHT side in source split mode */
402 val
= mdp5_read(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm
));
403 val
|= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT
;
404 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm
), val
);
407 spin_unlock_irqrestore(&mdp5_crtc
->lm_lock
, flags
);
410 static struct drm_encoder
*get_encoder_from_crtc(struct drm_crtc
*crtc
)
412 struct drm_device
*dev
= crtc
->dev
;
413 struct drm_encoder
*encoder
;
415 drm_for_each_encoder(encoder
, dev
)
416 if (encoder
->crtc
== crtc
)
422 static bool mdp5_crtc_get_scanout_position(struct drm_crtc
*crtc
,
424 int *vpos
, int *hpos
,
425 ktime_t
*stime
, ktime_t
*etime
,
426 const struct drm_display_mode
*mode
)
428 unsigned int pipe
= crtc
->index
;
429 struct drm_encoder
*encoder
;
430 int line
, vsw
, vbp
, vactive_start
, vactive_end
, vfp_end
;
433 encoder
= get_encoder_from_crtc(crtc
);
435 DRM_ERROR("no encoder found for crtc %d\n", pipe
);
439 vsw
= mode
->crtc_vsync_end
- mode
->crtc_vsync_start
;
440 vbp
= mode
->crtc_vtotal
- mode
->crtc_vsync_end
;
443 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
444 * the end of VFP. Translate the porch values relative to the line
448 vactive_start
= vsw
+ vbp
+ 1;
450 vactive_end
= vactive_start
+ mode
->crtc_vdisplay
;
452 /* last scan line before VSYNC */
453 vfp_end
= mode
->crtc_vtotal
;
456 *stime
= ktime_get();
458 line
= mdp5_encoder_get_linecount(encoder
);
460 if (line
< vactive_start
)
461 line
-= vactive_start
;
462 else if (line
> vactive_end
)
463 line
= line
- vfp_end
- vactive_start
;
465 line
-= vactive_start
;
471 *etime
= ktime_get();
476 static u32
mdp5_crtc_get_vblank_counter(struct drm_crtc
*crtc
)
478 struct drm_encoder
*encoder
;
480 encoder
= get_encoder_from_crtc(crtc
);
484 return mdp5_encoder_get_framecount(encoder
);
487 static void mdp5_crtc_atomic_disable(struct drm_crtc
*crtc
,
488 struct drm_atomic_state
*state
)
490 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
491 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
492 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
493 struct device
*dev
= &mdp5_kms
->pdev
->dev
;
496 DBG("%s", crtc
->name
);
498 if (WARN_ON(!mdp5_crtc
->enabled
))
501 /* Disable/save vblank irq handling before power is disabled */
502 drm_crtc_vblank_off(crtc
);
504 if (mdp5_cstate
->cmd_mode
)
505 mdp_irq_unregister(&mdp5_kms
->base
, &mdp5_crtc
->pp_done
);
507 mdp_irq_unregister(&mdp5_kms
->base
, &mdp5_crtc
->err
);
508 pm_runtime_put_sync(dev
);
510 if (crtc
->state
->event
&& !crtc
->state
->active
) {
511 WARN_ON(mdp5_crtc
->event
);
512 spin_lock_irqsave(&mdp5_kms
->dev
->event_lock
, flags
);
513 drm_crtc_send_vblank_event(crtc
, crtc
->state
->event
);
514 crtc
->state
->event
= NULL
;
515 spin_unlock_irqrestore(&mdp5_kms
->dev
->event_lock
, flags
);
518 mdp5_crtc
->enabled
= false;
521 static void mdp5_crtc_vblank_on(struct drm_crtc
*crtc
)
523 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
524 struct mdp5_interface
*intf
= mdp5_cstate
->pipeline
.intf
;
527 count
= intf
->mode
== MDP5_INTF_DSI_MODE_COMMAND
? 0 : 0xffffffff;
528 drm_crtc_set_max_vblank_count(crtc
, count
);
530 drm_crtc_vblank_on(crtc
);
533 static void mdp5_crtc_atomic_enable(struct drm_crtc
*crtc
,
534 struct drm_atomic_state
*state
)
536 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
537 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
538 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
539 struct device
*dev
= &mdp5_kms
->pdev
->dev
;
541 DBG("%s", crtc
->name
);
543 if (WARN_ON(mdp5_crtc
->enabled
))
546 pm_runtime_get_sync(dev
);
548 if (mdp5_crtc
->lm_cursor_enabled
) {
550 * Restore LM cursor state, as it might have been lost
553 if (mdp5_crtc
->cursor
.iova
) {
556 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
557 mdp5_crtc_restore_cursor(crtc
);
558 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
560 mdp5_ctl_set_cursor(mdp5_cstate
->ctl
,
561 &mdp5_cstate
->pipeline
, 0, true);
563 mdp5_ctl_set_cursor(mdp5_cstate
->ctl
,
564 &mdp5_cstate
->pipeline
, 0, false);
568 /* Restore vblank irq handling after power is enabled */
569 mdp5_crtc_vblank_on(crtc
);
571 mdp5_crtc_mode_set_nofb(crtc
);
573 mdp_irq_register(&mdp5_kms
->base
, &mdp5_crtc
->err
);
575 if (mdp5_cstate
->cmd_mode
)
576 mdp_irq_register(&mdp5_kms
->base
, &mdp5_crtc
->pp_done
);
578 mdp5_crtc
->enabled
= true;
581 static int mdp5_crtc_setup_pipeline(struct drm_crtc
*crtc
,
582 struct drm_crtc_state
*new_crtc_state
,
583 bool need_right_mixer
)
585 struct mdp5_crtc_state
*mdp5_cstate
=
586 to_mdp5_crtc_state(new_crtc_state
);
587 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
588 struct mdp5_interface
*intf
;
589 bool new_mixer
= false;
591 new_mixer
= !pipeline
->mixer
;
593 if ((need_right_mixer
&& !pipeline
->r_mixer
) ||
594 (!need_right_mixer
&& pipeline
->r_mixer
))
598 struct mdp5_hw_mixer
*old_mixer
= pipeline
->mixer
;
599 struct mdp5_hw_mixer
*old_r_mixer
= pipeline
->r_mixer
;
603 caps
= MDP_LM_CAP_DISPLAY
;
604 if (need_right_mixer
)
605 caps
|= MDP_LM_CAP_PAIR
;
607 ret
= mdp5_mixer_assign(new_crtc_state
->state
, crtc
, caps
,
608 &pipeline
->mixer
, need_right_mixer
?
609 &pipeline
->r_mixer
: NULL
);
613 mdp5_mixer_release(new_crtc_state
->state
, old_mixer
);
615 mdp5_mixer_release(new_crtc_state
->state
, old_r_mixer
);
616 if (!need_right_mixer
)
617 pipeline
->r_mixer
= NULL
;
622 * these should have been already set up in the encoder's atomic
623 * check (called by drm_atomic_helper_check_modeset)
625 intf
= pipeline
->intf
;
627 mdp5_cstate
->err_irqmask
= intf2err(intf
->num
);
628 mdp5_cstate
->vblank_irqmask
= intf2vblank(pipeline
->mixer
, intf
);
630 if ((intf
->type
== INTF_DSI
) &&
631 (intf
->mode
== MDP5_INTF_DSI_MODE_COMMAND
)) {
632 mdp5_cstate
->pp_done_irqmask
= lm2ppdone(pipeline
->mixer
);
633 mdp5_cstate
->cmd_mode
= true;
635 mdp5_cstate
->pp_done_irqmask
= 0;
636 mdp5_cstate
->cmd_mode
= false;
643 struct drm_plane
*plane
;
644 struct mdp5_plane_state
*state
;
647 static int pstate_cmp(const void *a
, const void *b
)
649 struct plane_state
*pa
= (struct plane_state
*)a
;
650 struct plane_state
*pb
= (struct plane_state
*)b
;
651 return pa
->state
->zpos
- pb
->state
->zpos
;
654 /* is there a helper for this? */
655 static bool is_fullscreen(struct drm_crtc_state
*cstate
,
656 struct drm_plane_state
*pstate
)
658 return (pstate
->crtc_x
<= 0) && (pstate
->crtc_y
<= 0) &&
659 ((pstate
->crtc_x
+ pstate
->crtc_w
) >= cstate
->mode
.hdisplay
) &&
660 ((pstate
->crtc_y
+ pstate
->crtc_h
) >= cstate
->mode
.vdisplay
);
663 static enum mdp_mixer_stage_id
get_start_stage(struct drm_crtc
*crtc
,
664 struct drm_crtc_state
*new_crtc_state
,
665 struct drm_plane_state
*bpstate
)
667 struct mdp5_crtc_state
*mdp5_cstate
=
668 to_mdp5_crtc_state(new_crtc_state
);
671 * if we're in source split mode, it's mandatory to have
672 * border out on the base stage
674 if (mdp5_cstate
->pipeline
.r_mixer
)
677 /* if the bottom-most layer is not fullscreen, we need to use
678 * it for solid-color:
680 if (!is_fullscreen(new_crtc_state
, bpstate
))
686 static int mdp5_crtc_atomic_check(struct drm_crtc
*crtc
,
687 struct drm_atomic_state
*state
)
689 struct drm_crtc_state
*crtc_state
= drm_atomic_get_new_crtc_state(state
,
691 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
692 struct drm_plane
*plane
;
693 struct drm_device
*dev
= crtc
->dev
;
694 struct plane_state pstates
[STAGE_MAX
+ 1];
695 const struct mdp5_cfg_hw
*hw_cfg
;
696 const struct drm_plane_state
*pstate
;
697 const struct drm_display_mode
*mode
= &crtc_state
->adjusted_mode
;
698 bool cursor_plane
= false;
699 bool need_right_mixer
= false;
702 enum mdp_mixer_stage_id start
;
704 DBG("%s: check", crtc
->name
);
706 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, crtc_state
) {
707 if (!pstate
->visible
)
710 pstates
[cnt
].plane
= plane
;
711 pstates
[cnt
].state
= to_mdp5_plane_state(pstate
);
714 * if any plane on this crtc uses 2 hwpipes, then we need
715 * the crtc to have a right hwmixer.
717 if (pstates
[cnt
].state
->r_hwpipe
)
718 need_right_mixer
= true;
721 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
725 /* bail out early if there aren't any planes */
729 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
732 * we need a right hwmixer if the mode's width is greater than a single
735 if (mode
->hdisplay
> hw_cfg
->lm
.max_width
)
736 need_right_mixer
= true;
738 ret
= mdp5_crtc_setup_pipeline(crtc
, crtc_state
, need_right_mixer
);
740 DRM_DEV_ERROR(dev
->dev
, "couldn't assign mixers %d\n", ret
);
744 /* assign a stage based on sorted zpos property */
745 sort(pstates
, cnt
, sizeof(pstates
[0]), pstate_cmp
, NULL
);
747 /* trigger a warning if cursor isn't the highest zorder */
748 WARN_ON(cursor_plane
&&
749 (pstates
[cnt
- 1].plane
->type
!= DRM_PLANE_TYPE_CURSOR
));
751 start
= get_start_stage(crtc
, crtc_state
, &pstates
[0].state
->base
);
753 /* verify that there are not too many planes attached to crtc
754 * and that we don't have conflicting mixer stages:
756 if ((cnt
+ start
- 1) >= hw_cfg
->lm
.nb_stages
) {
757 DRM_DEV_ERROR(dev
->dev
, "too many planes! cnt=%d, start stage=%d\n",
762 for (i
= 0; i
< cnt
; i
++) {
763 if (cursor_plane
&& (i
== (cnt
- 1)))
764 pstates
[i
].state
->stage
= hw_cfg
->lm
.nb_stages
;
766 pstates
[i
].state
->stage
= start
+ i
;
767 DBG("%s: assign pipe %s on stage=%d", crtc
->name
,
768 pstates
[i
].plane
->name
,
769 pstates
[i
].state
->stage
);
775 static void mdp5_crtc_atomic_begin(struct drm_crtc
*crtc
,
776 struct drm_atomic_state
*state
)
778 DBG("%s: begin", crtc
->name
);
781 static void mdp5_crtc_atomic_flush(struct drm_crtc
*crtc
,
782 struct drm_atomic_state
*state
)
784 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
785 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
786 struct drm_device
*dev
= crtc
->dev
;
789 DBG("%s: event: %p", crtc
->name
, crtc
->state
->event
);
791 WARN_ON(mdp5_crtc
->event
);
793 spin_lock_irqsave(&dev
->event_lock
, flags
);
794 mdp5_crtc
->event
= crtc
->state
->event
;
795 crtc
->state
->event
= NULL
;
796 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
799 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
800 * it means we are trying to flush a CRTC whose state is disabled:
801 * nothing else needs to be done.
803 /* XXX: Can this happen now ? */
804 if (unlikely(!mdp5_cstate
->ctl
))
809 /* PP_DONE irq is only used by command mode for now.
810 * It is better to request pending before FLUSH and START trigger
811 * to make sure no pp_done irq missed.
812 * This is safe because no pp_done will happen before SW trigger
815 if (mdp5_cstate
->cmd_mode
)
816 request_pp_done_pending(crtc
);
818 mdp5_crtc
->flushed_mask
= crtc_flush_all(crtc
);
820 /* XXX are we leaking out state here? */
821 mdp5_crtc
->vblank
.irqmask
= mdp5_cstate
->vblank_irqmask
;
822 mdp5_crtc
->err
.irqmask
= mdp5_cstate
->err_irqmask
;
823 mdp5_crtc
->pp_done
.irqmask
= mdp5_cstate
->pp_done_irqmask
;
825 request_pending(crtc
, PENDING_FLIP
);
828 static void get_roi(struct drm_crtc
*crtc
, uint32_t *roi_w
, uint32_t *roi_h
)
830 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
831 uint32_t xres
= crtc
->mode
.hdisplay
;
832 uint32_t yres
= crtc
->mode
.vdisplay
;
835 * Cursor Region Of Interest (ROI) is a plane read from cursor
836 * buffer to render. The ROI region is determined by the visibility of
837 * the cursor point. In the default Cursor image the cursor point will
838 * be at the top left of the cursor image.
841 * If the cursor point reaches the right (xres - x < cursor.width) or
842 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
843 * width and ROI height need to be evaluated to crop the cursor image
845 * (xres-x) will be new cursor width when x > (xres - cursor.width)
846 * (yres-y) will be new cursor height when y > (yres - cursor.height)
849 * We get negative x and/or y coordinates.
850 * (cursor.width - abs(x)) will be new cursor width when x < 0
851 * (cursor.height - abs(y)) will be new cursor width when y < 0
853 if (mdp5_crtc
->cursor
.x
>= 0)
854 *roi_w
= min(mdp5_crtc
->cursor
.width
, xres
-
855 mdp5_crtc
->cursor
.x
);
857 *roi_w
= mdp5_crtc
->cursor
.width
- abs(mdp5_crtc
->cursor
.x
);
858 if (mdp5_crtc
->cursor
.y
>= 0)
859 *roi_h
= min(mdp5_crtc
->cursor
.height
, yres
-
860 mdp5_crtc
->cursor
.y
);
862 *roi_h
= mdp5_crtc
->cursor
.height
- abs(mdp5_crtc
->cursor
.y
);
865 static void mdp5_crtc_restore_cursor(struct drm_crtc
*crtc
)
867 const struct drm_format_info
*info
= drm_format_info(DRM_FORMAT_ARGB8888
);
868 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
869 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
870 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
871 const enum mdp5_cursor_alpha cur_alpha
= CURSOR_ALPHA_PER_PIXEL
;
872 uint32_t blendcfg
, stride
;
873 uint32_t x
, y
, src_x
, src_y
, width
, height
;
874 uint32_t roi_w
, roi_h
;
877 assert_spin_locked(&mdp5_crtc
->cursor
.lock
);
879 lm
= mdp5_cstate
->pipeline
.mixer
->lm
;
881 x
= mdp5_crtc
->cursor
.x
;
882 y
= mdp5_crtc
->cursor
.y
;
883 width
= mdp5_crtc
->cursor
.width
;
884 height
= mdp5_crtc
->cursor
.height
;
886 stride
= width
* info
->cpp
[0];
888 get_roi(crtc
, &roi_w
, &roi_h
);
890 /* If cusror buffer overlaps due to rotation on the
891 * upper or left screen border the pixel offset inside
892 * the cursor buffer of the ROI is the positive overlap
895 if (mdp5_crtc
->cursor
.x
< 0) {
896 src_x
= abs(mdp5_crtc
->cursor
.x
);
901 if (mdp5_crtc
->cursor
.y
< 0) {
902 src_y
= abs(mdp5_crtc
->cursor
.y
);
907 DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
908 crtc
->name
, x
, y
, roi_w
, roi_h
, src_x
, src_y
);
910 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_STRIDE(lm
), stride
);
911 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_FORMAT(lm
),
912 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888
));
913 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_IMG_SIZE(lm
),
914 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height
) |
915 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width
));
916 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_SIZE(lm
),
917 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h
) |
918 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w
));
919 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_START_XY(lm
),
920 MDP5_LM_CURSOR_START_XY_Y_START(y
) |
921 MDP5_LM_CURSOR_START_XY_X_START(x
));
922 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_XY(lm
),
923 MDP5_LM_CURSOR_XY_SRC_Y(src_y
) |
924 MDP5_LM_CURSOR_XY_SRC_X(src_x
));
925 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_BASE_ADDR(lm
),
926 mdp5_crtc
->cursor
.iova
);
928 blendcfg
= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN
;
929 blendcfg
|= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha
);
930 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm
), blendcfg
);
933 static int mdp5_crtc_cursor_set(struct drm_crtc
*crtc
,
934 struct drm_file
*file
, uint32_t handle
,
935 uint32_t width
, uint32_t height
)
937 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
938 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
939 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
940 struct drm_device
*dev
= crtc
->dev
;
941 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
942 struct platform_device
*pdev
= mdp5_kms
->pdev
;
943 struct msm_kms
*kms
= &mdp5_kms
->base
.base
;
944 struct drm_gem_object
*cursor_bo
, *old_bo
= NULL
;
945 struct mdp5_ctl
*ctl
;
947 uint32_t flush_mask
= mdp_ctl_flush_mask_cursor(0);
948 bool cursor_enable
= true;
951 if (!mdp5_crtc
->lm_cursor_enabled
) {
953 "cursor_set is deprecated with cursor planes\n");
957 if ((width
> CURSOR_WIDTH
) || (height
> CURSOR_HEIGHT
)) {
958 DRM_DEV_ERROR(dev
->dev
, "bad cursor size: %dx%d\n", width
, height
);
962 ctl
= mdp5_cstate
->ctl
;
966 /* don't support LM cursors when we have source split enabled */
967 if (mdp5_cstate
->pipeline
.r_mixer
)
972 cursor_enable
= false;
973 mdp5_crtc
->cursor
.iova
= 0;
974 pm_runtime_get_sync(&pdev
->dev
);
978 cursor_bo
= drm_gem_object_lookup(file
, handle
);
982 ret
= msm_gem_get_and_pin_iova(cursor_bo
, kms
->aspace
,
983 &mdp5_crtc
->cursor
.iova
);
987 pm_runtime_get_sync(&pdev
->dev
);
989 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
990 old_bo
= mdp5_crtc
->cursor
.scanout_bo
;
992 mdp5_crtc
->cursor
.scanout_bo
= cursor_bo
;
993 mdp5_crtc
->cursor
.width
= width
;
994 mdp5_crtc
->cursor
.height
= height
;
996 mdp5_crtc_restore_cursor(crtc
);
998 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
1001 ret
= mdp5_ctl_set_cursor(ctl
, pipeline
, 0, cursor_enable
);
1003 DRM_DEV_ERROR(dev
->dev
, "failed to %sable cursor: %d\n",
1004 cursor_enable
? "en" : "dis", ret
);
1008 crtc_flush(crtc
, flush_mask
);
1011 pm_runtime_put_sync(&pdev
->dev
);
1013 drm_flip_work_queue(&mdp5_crtc
->unref_cursor_work
, old_bo
);
1014 /* enable vblank to complete cursor work: */
1015 request_pending(crtc
, PENDING_CURSOR
);
1020 static int mdp5_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
1022 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
1023 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
1024 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1025 uint32_t flush_mask
= mdp_ctl_flush_mask_cursor(0);
1026 struct drm_device
*dev
= crtc
->dev
;
1029 unsigned long flags
;
1031 if (!mdp5_crtc
->lm_cursor_enabled
) {
1033 "cursor_move is deprecated with cursor planes\n");
1037 /* don't support LM cursors when we have source split enabled */
1038 if (mdp5_cstate
->pipeline
.r_mixer
)
1041 /* In case the CRTC is disabled, just drop the cursor update */
1042 if (unlikely(!crtc
->state
->enable
))
1045 /* accept negative x/y coordinates up to maximum cursor overlap */
1046 mdp5_crtc
->cursor
.x
= x
= max(x
, -(int)mdp5_crtc
->cursor
.width
);
1047 mdp5_crtc
->cursor
.y
= y
= max(y
, -(int)mdp5_crtc
->cursor
.height
);
1049 get_roi(crtc
, &roi_w
, &roi_h
);
1051 pm_runtime_get_sync(&mdp5_kms
->pdev
->dev
);
1053 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
1054 mdp5_crtc_restore_cursor(crtc
);
1055 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
1057 crtc_flush(crtc
, flush_mask
);
1059 pm_runtime_put_sync(&mdp5_kms
->pdev
->dev
);
1065 mdp5_crtc_atomic_print_state(struct drm_printer
*p
,
1066 const struct drm_crtc_state
*state
)
1068 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(state
);
1069 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
1070 struct mdp5_kms
*mdp5_kms
= get_kms(state
->crtc
);
1072 if (WARN_ON(!pipeline
))
1075 if (mdp5_cstate
->ctl
)
1076 drm_printf(p
, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate
->ctl
));
1078 drm_printf(p
, "\thwmixer=%s\n", pipeline
->mixer
?
1079 pipeline
->mixer
->name
: "(null)");
1081 if (mdp5_kms
->caps
& MDP_CAP_SRC_SPLIT
)
1082 drm_printf(p
, "\tright hwmixer=%s\n", pipeline
->r_mixer
?
1083 pipeline
->r_mixer
->name
: "(null)");
1085 drm_printf(p
, "\tcmd_mode=%d\n", mdp5_cstate
->cmd_mode
);
1088 static struct drm_crtc_state
*
1089 mdp5_crtc_duplicate_state(struct drm_crtc
*crtc
)
1091 struct mdp5_crtc_state
*mdp5_cstate
;
1093 if (WARN_ON(!crtc
->state
))
1096 mdp5_cstate
= kmemdup(to_mdp5_crtc_state(crtc
->state
),
1097 sizeof(*mdp5_cstate
), GFP_KERNEL
);
1101 __drm_atomic_helper_crtc_duplicate_state(crtc
, &mdp5_cstate
->base
);
1103 return &mdp5_cstate
->base
;
1106 static void mdp5_crtc_destroy_state(struct drm_crtc
*crtc
, struct drm_crtc_state
*state
)
1108 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(state
);
1110 __drm_atomic_helper_crtc_destroy_state(state
);
1115 static void mdp5_crtc_reset(struct drm_crtc
*crtc
)
1117 struct mdp5_crtc_state
*mdp5_cstate
=
1118 kzalloc(sizeof(*mdp5_cstate
), GFP_KERNEL
);
1121 mdp5_crtc_destroy_state(crtc
, crtc
->state
);
1123 __drm_atomic_helper_crtc_reset(crtc
, &mdp5_cstate
->base
);
1126 static const struct drm_crtc_funcs mdp5_crtc_funcs
= {
1127 .set_config
= drm_atomic_helper_set_config
,
1128 .destroy
= mdp5_crtc_destroy
,
1129 .page_flip
= drm_atomic_helper_page_flip
,
1130 .reset
= mdp5_crtc_reset
,
1131 .atomic_duplicate_state
= mdp5_crtc_duplicate_state
,
1132 .atomic_destroy_state
= mdp5_crtc_destroy_state
,
1133 .cursor_set
= mdp5_crtc_cursor_set
,
1134 .cursor_move
= mdp5_crtc_cursor_move
,
1135 .atomic_print_state
= mdp5_crtc_atomic_print_state
,
1136 .get_vblank_counter
= mdp5_crtc_get_vblank_counter
,
1137 .enable_vblank
= msm_crtc_enable_vblank
,
1138 .disable_vblank
= msm_crtc_disable_vblank
,
1139 .get_vblank_timestamp
= drm_crtc_vblank_helper_get_vblank_timestamp
,
1142 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs
= {
1143 .mode_set_nofb
= mdp5_crtc_mode_set_nofb
,
1144 .atomic_check
= mdp5_crtc_atomic_check
,
1145 .atomic_begin
= mdp5_crtc_atomic_begin
,
1146 .atomic_flush
= mdp5_crtc_atomic_flush
,
1147 .atomic_enable
= mdp5_crtc_atomic_enable
,
1148 .atomic_disable
= mdp5_crtc_atomic_disable
,
1149 .get_scanout_position
= mdp5_crtc_get_scanout_position
,
1152 static void mdp5_crtc_vblank_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
1154 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, vblank
);
1155 struct drm_crtc
*crtc
= &mdp5_crtc
->base
;
1156 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
1159 mdp_irq_unregister(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
1161 pending
= atomic_xchg(&mdp5_crtc
->pending
, 0);
1163 if (pending
& PENDING_FLIP
) {
1164 complete_flip(crtc
, NULL
);
1167 if (pending
& PENDING_CURSOR
)
1168 drm_flip_work_commit(&mdp5_crtc
->unref_cursor_work
, priv
->wq
);
1171 static void mdp5_crtc_err_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
1173 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, err
);
1175 DBG("%s: error: %08x", mdp5_crtc
->base
.name
, irqstatus
);
1178 static void mdp5_crtc_pp_done_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
1180 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
,
1183 complete(&mdp5_crtc
->pp_completion
);
1186 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc
*crtc
)
1188 struct drm_device
*dev
= crtc
->dev
;
1189 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
1190 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1193 ret
= wait_for_completion_timeout(&mdp5_crtc
->pp_completion
,
1194 msecs_to_jiffies(50));
1196 dev_warn_ratelimited(dev
->dev
, "pp done time out, lm=%d\n",
1197 mdp5_cstate
->pipeline
.mixer
->lm
);
1200 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc
*crtc
)
1202 struct drm_device
*dev
= crtc
->dev
;
1203 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
1204 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1205 struct mdp5_ctl
*ctl
= mdp5_cstate
->ctl
;
1208 /* Should not call this function if crtc is disabled. */
1212 ret
= drm_crtc_vblank_get(crtc
);
1216 ret
= wait_event_timeout(dev
->vblank
[drm_crtc_index(crtc
)].queue
,
1217 ((mdp5_ctl_get_commit_status(ctl
) &
1218 mdp5_crtc
->flushed_mask
) == 0),
1219 msecs_to_jiffies(50));
1221 dev_warn(dev
->dev
, "vblank time out, crtc=%d\n", mdp5_crtc
->id
);
1223 mdp5_crtc
->flushed_mask
= 0;
1225 drm_crtc_vblank_put(crtc
);
1228 uint32_t mdp5_crtc_vblank(struct drm_crtc
*crtc
)
1230 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
1231 return mdp5_crtc
->vblank
.irqmask
;
1234 void mdp5_crtc_set_pipeline(struct drm_crtc
*crtc
)
1236 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1237 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
1239 /* should this be done elsewhere ? */
1240 mdp_irq_update(&mdp5_kms
->base
);
1242 mdp5_ctl_set_pipeline(mdp5_cstate
->ctl
, &mdp5_cstate
->pipeline
);
1245 struct mdp5_ctl
*mdp5_crtc_get_ctl(struct drm_crtc
*crtc
)
1247 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1249 return mdp5_cstate
->ctl
;
1252 struct mdp5_hw_mixer
*mdp5_crtc_get_mixer(struct drm_crtc
*crtc
)
1254 struct mdp5_crtc_state
*mdp5_cstate
;
1257 return ERR_PTR(-EINVAL
);
1259 mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1261 return WARN_ON(!mdp5_cstate
->pipeline
.mixer
) ?
1262 ERR_PTR(-EINVAL
) : mdp5_cstate
->pipeline
.mixer
;
1265 struct mdp5_pipeline
*mdp5_crtc_get_pipeline(struct drm_crtc
*crtc
)
1267 struct mdp5_crtc_state
*mdp5_cstate
;
1270 return ERR_PTR(-EINVAL
);
1272 mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1274 return &mdp5_cstate
->pipeline
;
1277 void mdp5_crtc_wait_for_commit_done(struct drm_crtc
*crtc
)
1279 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1281 if (mdp5_cstate
->cmd_mode
)
1282 mdp5_crtc_wait_for_pp_done(crtc
);
1284 mdp5_crtc_wait_for_flush_done(crtc
);
1287 /* initialize crtc */
1288 struct drm_crtc
*mdp5_crtc_init(struct drm_device
*dev
,
1289 struct drm_plane
*plane
,
1290 struct drm_plane
*cursor_plane
, int id
)
1292 struct drm_crtc
*crtc
= NULL
;
1293 struct mdp5_crtc
*mdp5_crtc
;
1295 mdp5_crtc
= kzalloc(sizeof(*mdp5_crtc
), GFP_KERNEL
);
1297 return ERR_PTR(-ENOMEM
);
1299 crtc
= &mdp5_crtc
->base
;
1303 spin_lock_init(&mdp5_crtc
->lm_lock
);
1304 spin_lock_init(&mdp5_crtc
->cursor
.lock
);
1305 init_completion(&mdp5_crtc
->pp_completion
);
1307 mdp5_crtc
->vblank
.irq
= mdp5_crtc_vblank_irq
;
1308 mdp5_crtc
->err
.irq
= mdp5_crtc_err_irq
;
1309 mdp5_crtc
->pp_done
.irq
= mdp5_crtc_pp_done_irq
;
1311 mdp5_crtc
->lm_cursor_enabled
= cursor_plane
? false : true;
1313 drm_crtc_init_with_planes(dev
, crtc
, plane
, cursor_plane
,
1314 &mdp5_crtc_funcs
, NULL
);
1316 drm_flip_work_init(&mdp5_crtc
->unref_cursor_work
,
1317 "unref cursor", unref_cursor_worker
);
1319 drm_crtc_helper_add(crtc
, &mdp5_crtc_helper_funcs
);