2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/sort.h>
22 #include <drm/drm_mode.h>
24 #include "drm_crtc_helper.h"
25 #include "drm_flip_work.h"
27 #define CURSOR_WIDTH 64
28 #define CURSOR_HEIGHT 64
30 #define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
38 /* layer mixer used for this CRTC (+ its lock): */
39 #define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
41 spinlock_t lm_lock
; /* protect REG_MDP5_LM_* registers */
43 /* CTL used for this CRTC: */
46 /* if there is a pending flip, these will be non-null: */
47 struct drm_pending_vblank_event
*event
;
49 /* Bits have been flushed at the last commit,
50 * used to decide if a vsync has happened since last commit.
54 #define PENDING_CURSOR 0x1
55 #define PENDING_FLIP 0x2
58 /* for unref'ing cursor bo's after scanout completes: */
59 struct drm_flip_work unref_cursor_work
;
61 struct mdp_irq vblank
;
63 struct mdp_irq pp_done
;
65 struct completion pp_completion
;
70 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
73 /* current cursor being scanned out: */
74 struct drm_gem_object
*scanout_bo
;
75 uint32_t width
, height
;
79 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
81 static struct mdp5_kms
*get_kms(struct drm_crtc
*crtc
)
83 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
84 return to_mdp5_kms(to_mdp_kms(priv
->kms
));
87 static void request_pending(struct drm_crtc
*crtc
, uint32_t pending
)
89 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
91 atomic_or(pending
, &mdp5_crtc
->pending
);
92 mdp_irq_register(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
95 static void request_pp_done_pending(struct drm_crtc
*crtc
)
97 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
98 reinit_completion(&mdp5_crtc
->pp_completion
);
101 static u32
crtc_flush(struct drm_crtc
*crtc
, u32 flush_mask
)
103 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
105 DBG("%s: flush=%08x", mdp5_crtc
->name
, flush_mask
);
106 return mdp5_ctl_commit(mdp5_crtc
->ctl
, flush_mask
);
110 * flush updates, to make sure hw is updated to new scanout fb,
111 * so that we can safely queue unref to current fb (ie. next
112 * vblank we know hw is done w/ previous scanout_fb).
114 static u32
crtc_flush_all(struct drm_crtc
*crtc
)
116 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
117 struct drm_plane
*plane
;
118 uint32_t flush_mask
= 0;
120 /* this should not happen: */
121 if (WARN_ON(!mdp5_crtc
->ctl
))
124 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
125 flush_mask
|= mdp5_plane_get_flush(plane
);
128 flush_mask
|= mdp_ctl_flush_mask_lm(mdp5_crtc
->lm
);
130 return crtc_flush(crtc
, flush_mask
);
133 /* if file!=NULL, this is preclose potential cancel-flip path */
134 static void complete_flip(struct drm_crtc
*crtc
, struct drm_file
*file
)
136 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
137 struct drm_device
*dev
= crtc
->dev
;
138 struct drm_pending_vblank_event
*event
;
139 struct drm_plane
*plane
;
142 spin_lock_irqsave(&dev
->event_lock
, flags
);
143 event
= mdp5_crtc
->event
;
145 /* if regular vblank case (!file) or if cancel-flip from
146 * preclose on file that requested flip, then send the
149 if (!file
|| (event
->base
.file_priv
== file
)) {
150 mdp5_crtc
->event
= NULL
;
151 DBG("%s: send event: %p", mdp5_crtc
->name
, event
);
152 drm_send_vblank_event(dev
, mdp5_crtc
->id
, event
);
155 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
157 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
158 mdp5_plane_complete_flip(plane
);
161 if (mdp5_crtc
->ctl
&& !crtc
->state
->enable
) {
162 /* set STAGE_UNUSED for all layers */
163 mdp5_ctl_blend(mdp5_crtc
->ctl
, NULL
, 0, 0);
164 mdp5_crtc
->ctl
= NULL
;
168 static void unref_cursor_worker(struct drm_flip_work
*work
, void *val
)
170 struct mdp5_crtc
*mdp5_crtc
=
171 container_of(work
, struct mdp5_crtc
, unref_cursor_work
);
172 struct mdp5_kms
*mdp5_kms
= get_kms(&mdp5_crtc
->base
);
174 msm_gem_put_iova(val
, mdp5_kms
->id
);
175 drm_gem_object_unreference_unlocked(val
);
178 static void mdp5_crtc_destroy(struct drm_crtc
*crtc
)
180 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
182 drm_crtc_cleanup(crtc
);
183 drm_flip_work_cleanup(&mdp5_crtc
->unref_cursor_work
);
188 static bool mdp5_crtc_mode_fixup(struct drm_crtc
*crtc
,
189 const struct drm_display_mode
*mode
,
190 struct drm_display_mode
*adjusted_mode
)
196 * blend_setup() - blend all the planes of a CRTC
198 * If no base layer is available, border will be enabled as the base layer.
199 * Otherwise all layers will be blended based on their stage calculated
200 * in mdp5_crtc_atomic_check.
202 static void blend_setup(struct drm_crtc
*crtc
)
204 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
205 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
206 struct drm_plane
*plane
;
207 const struct mdp5_cfg_hw
*hw_cfg
;
208 struct mdp5_plane_state
*pstate
, *pstates
[STAGE_MAX
+ 1] = {NULL
};
209 const struct mdp_format
*format
;
210 uint32_t lm
= mdp5_crtc
->lm
;
211 uint32_t blend_op
, fg_alpha
, bg_alpha
, ctl_blend_flags
= 0;
213 uint8_t stage
[STAGE_MAX
+ 1];
214 int i
, plane_cnt
= 0;
215 #define blender(stage) ((stage) - STAGE0)
217 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
219 spin_lock_irqsave(&mdp5_crtc
->lm_lock
, flags
);
221 /* ctl could be released already when we are shutting down: */
225 /* Collect all plane information */
226 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
227 pstate
= to_mdp5_plane_state(plane
->state
);
228 pstates
[pstate
->stage
] = pstate
;
229 stage
[pstate
->stage
] = mdp5_plane_pipe(plane
);
234 * If there is no base layer, enable border color.
235 * Although it's not possbile in current blend logic,
236 * put it here as a reminder.
238 if (!pstates
[STAGE_BASE
] && plane_cnt
) {
239 ctl_blend_flags
|= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT
;
240 DBG("Border Color is enabled");
243 /* The reset for blending */
244 for (i
= STAGE0
; i
<= STAGE_MAX
; i
++) {
248 format
= to_mdp_format(
249 msm_framebuffer_format(pstates
[i
]->base
.fb
));
250 plane
= pstates
[i
]->base
.plane
;
251 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST
) |
252 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST
);
253 fg_alpha
= pstates
[i
]->alpha
;
254 bg_alpha
= 0xFF - pstates
[i
]->alpha
;
255 DBG("Stage %d fg_alpha %x bg_alpha %x", i
, fg_alpha
, bg_alpha
);
257 if (format
->alpha_enable
&& pstates
[i
]->premultiplied
) {
258 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST
) |
259 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL
);
260 if (fg_alpha
!= 0xff) {
263 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA
|
264 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA
;
266 blend_op
|= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA
;
268 } else if (format
->alpha_enable
) {
269 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL
) |
270 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL
);
271 if (fg_alpha
!= 0xff) {
274 MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA
|
275 MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA
|
276 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA
|
277 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA
;
279 blend_op
|= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA
;
283 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_OP_MODE(lm
,
284 blender(i
)), blend_op
);
285 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_FG_ALPHA(lm
,
286 blender(i
)), fg_alpha
);
287 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_BG_ALPHA(lm
,
288 blender(i
)), bg_alpha
);
291 mdp5_ctl_blend(mdp5_crtc
->ctl
, stage
, plane_cnt
, ctl_blend_flags
);
294 spin_unlock_irqrestore(&mdp5_crtc
->lm_lock
, flags
);
297 static void mdp5_crtc_mode_set_nofb(struct drm_crtc
*crtc
)
299 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
300 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
302 struct drm_display_mode
*mode
;
304 if (WARN_ON(!crtc
->state
))
307 mode
= &crtc
->state
->adjusted_mode
;
309 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
310 mdp5_crtc
->name
, mode
->base
.id
, mode
->name
,
311 mode
->vrefresh
, mode
->clock
,
312 mode
->hdisplay
, mode
->hsync_start
,
313 mode
->hsync_end
, mode
->htotal
,
314 mode
->vdisplay
, mode
->vsync_start
,
315 mode
->vsync_end
, mode
->vtotal
,
316 mode
->type
, mode
->flags
);
318 spin_lock_irqsave(&mdp5_crtc
->lm_lock
, flags
);
319 mdp5_write(mdp5_kms
, REG_MDP5_LM_OUT_SIZE(mdp5_crtc
->lm
),
320 MDP5_LM_OUT_SIZE_WIDTH(mode
->hdisplay
) |
321 MDP5_LM_OUT_SIZE_HEIGHT(mode
->vdisplay
));
322 spin_unlock_irqrestore(&mdp5_crtc
->lm_lock
, flags
);
325 static void mdp5_crtc_disable(struct drm_crtc
*crtc
)
327 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
328 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
330 DBG("%s", mdp5_crtc
->name
);
332 if (WARN_ON(!mdp5_crtc
->enabled
))
335 if (mdp5_crtc
->cmd_mode
)
336 mdp_irq_unregister(&mdp5_kms
->base
, &mdp5_crtc
->pp_done
);
338 mdp_irq_unregister(&mdp5_kms
->base
, &mdp5_crtc
->err
);
339 mdp5_disable(mdp5_kms
);
341 mdp5_crtc
->enabled
= false;
344 static void mdp5_crtc_enable(struct drm_crtc
*crtc
)
346 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
347 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
349 DBG("%s", mdp5_crtc
->name
);
351 if (WARN_ON(mdp5_crtc
->enabled
))
354 mdp5_enable(mdp5_kms
);
355 mdp_irq_register(&mdp5_kms
->base
, &mdp5_crtc
->err
);
357 if (mdp5_crtc
->cmd_mode
)
358 mdp_irq_register(&mdp5_kms
->base
, &mdp5_crtc
->pp_done
);
360 mdp5_crtc
->enabled
= true;
364 struct drm_plane
*plane
;
365 struct mdp5_plane_state
*state
;
368 static int pstate_cmp(const void *a
, const void *b
)
370 struct plane_state
*pa
= (struct plane_state
*)a
;
371 struct plane_state
*pb
= (struct plane_state
*)b
;
372 return pa
->state
->zpos
- pb
->state
->zpos
;
375 static int mdp5_crtc_atomic_check(struct drm_crtc
*crtc
,
376 struct drm_crtc_state
*state
)
378 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
379 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
380 struct drm_plane
*plane
;
381 struct drm_device
*dev
= crtc
->dev
;
382 struct plane_state pstates
[STAGE_MAX
+ 1];
383 const struct mdp5_cfg_hw
*hw_cfg
;
386 DBG("%s: check", mdp5_crtc
->name
);
388 /* verify that there are not too many planes attached to crtc
389 * and that we don't have conflicting mixer stages:
391 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
392 drm_atomic_crtc_state_for_each_plane(plane
, state
) {
393 struct drm_plane_state
*pstate
;
394 if (cnt
>= (hw_cfg
->lm
.nb_stages
)) {
395 dev_err(dev
->dev
, "too many planes!\n");
399 pstate
= state
->state
->plane_states
[drm_plane_index(plane
)];
401 /* plane might not have changed, in which case take
405 pstate
= plane
->state
;
406 pstates
[cnt
].plane
= plane
;
407 pstates
[cnt
].state
= to_mdp5_plane_state(pstate
);
412 /* assign a stage based on sorted zpos property */
413 sort(pstates
, cnt
, sizeof(pstates
[0]), pstate_cmp
, NULL
);
415 for (i
= 0; i
< cnt
; i
++) {
416 pstates
[i
].state
->stage
= STAGE_BASE
+ i
;
417 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc
->name
,
418 pipe2name(mdp5_plane_pipe(pstates
[i
].plane
)),
419 pstates
[i
].state
->stage
);
425 static void mdp5_crtc_atomic_begin(struct drm_crtc
*crtc
,
426 struct drm_crtc_state
*old_crtc_state
)
428 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
429 DBG("%s: begin", mdp5_crtc
->name
);
432 static void mdp5_crtc_atomic_flush(struct drm_crtc
*crtc
,
433 struct drm_crtc_state
*old_crtc_state
)
435 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
436 struct drm_device
*dev
= crtc
->dev
;
439 DBG("%s: event: %p", mdp5_crtc
->name
, crtc
->state
->event
);
441 WARN_ON(mdp5_crtc
->event
);
443 spin_lock_irqsave(&dev
->event_lock
, flags
);
444 mdp5_crtc
->event
= crtc
->state
->event
;
445 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
448 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
449 * it means we are trying to flush a CRTC whose state is disabled:
450 * nothing else needs to be done.
452 if (unlikely(!mdp5_crtc
->ctl
))
457 /* PP_DONE irq is only used by command mode for now.
458 * It is better to request pending before FLUSH and START trigger
459 * to make sure no pp_done irq missed.
460 * This is safe because no pp_done will happen before SW trigger
463 if (mdp5_crtc
->cmd_mode
)
464 request_pp_done_pending(crtc
);
466 mdp5_crtc
->flushed_mask
= crtc_flush_all(crtc
);
468 request_pending(crtc
, PENDING_FLIP
);
471 static int mdp5_crtc_set_property(struct drm_crtc
*crtc
,
472 struct drm_property
*property
, uint64_t val
)
478 static void get_roi(struct drm_crtc
*crtc
, uint32_t *roi_w
, uint32_t *roi_h
)
480 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
481 uint32_t xres
= crtc
->mode
.hdisplay
;
482 uint32_t yres
= crtc
->mode
.vdisplay
;
485 * Cursor Region Of Interest (ROI) is a plane read from cursor
486 * buffer to render. The ROI region is determined by the visibility of
487 * the cursor point. In the default Cursor image the cursor point will
488 * be at the top left of the cursor image, unless it is specified
489 * otherwise using hotspot feature.
491 * If the cursor point reaches the right (xres - x < cursor.width) or
492 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
493 * width and ROI height need to be evaluated to crop the cursor image
495 * (xres-x) will be new cursor width when x > (xres - cursor.width)
496 * (yres-y) will be new cursor height when y > (yres - cursor.height)
498 *roi_w
= min(mdp5_crtc
->cursor
.width
, xres
-
499 mdp5_crtc
->cursor
.x
);
500 *roi_h
= min(mdp5_crtc
->cursor
.height
, yres
-
501 mdp5_crtc
->cursor
.y
);
504 static int mdp5_crtc_cursor_set(struct drm_crtc
*crtc
,
505 struct drm_file
*file
, uint32_t handle
,
506 uint32_t width
, uint32_t height
)
508 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
509 struct drm_device
*dev
= crtc
->dev
;
510 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
511 struct drm_gem_object
*cursor_bo
, *old_bo
= NULL
;
512 uint32_t blendcfg
, cursor_addr
, stride
;
515 enum mdp5_cursor_alpha cur_alpha
= CURSOR_ALPHA_PER_PIXEL
;
516 uint32_t flush_mask
= mdp_ctl_flush_mask_cursor(0);
517 uint32_t roi_w
, roi_h
;
518 bool cursor_enable
= true;
521 if ((width
> CURSOR_WIDTH
) || (height
> CURSOR_HEIGHT
)) {
522 dev_err(dev
->dev
, "bad cursor size: %dx%d\n", width
, height
);
526 if (NULL
== mdp5_crtc
->ctl
)
531 cursor_enable
= false;
535 cursor_bo
= drm_gem_object_lookup(dev
, file
, handle
);
539 ret
= msm_gem_get_iova(cursor_bo
, mdp5_kms
->id
, &cursor_addr
);
544 drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888
, &depth
, &bpp
);
545 stride
= width
* (bpp
>> 3);
547 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
548 old_bo
= mdp5_crtc
->cursor
.scanout_bo
;
550 mdp5_crtc
->cursor
.scanout_bo
= cursor_bo
;
551 mdp5_crtc
->cursor
.width
= width
;
552 mdp5_crtc
->cursor
.height
= height
;
554 get_roi(crtc
, &roi_w
, &roi_h
);
556 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_STRIDE(lm
), stride
);
557 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_FORMAT(lm
),
558 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888
));
559 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_IMG_SIZE(lm
),
560 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height
) |
561 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width
));
562 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_SIZE(lm
),
563 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h
) |
564 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w
));
565 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_BASE_ADDR(lm
), cursor_addr
);
567 blendcfg
= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN
;
568 blendcfg
|= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha
);
569 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm
), blendcfg
);
571 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
574 ret
= mdp5_ctl_set_cursor(mdp5_crtc
->ctl
, 0, cursor_enable
);
576 dev_err(dev
->dev
, "failed to %sable cursor: %d\n",
577 cursor_enable
? "en" : "dis", ret
);
581 crtc_flush(crtc
, flush_mask
);
585 drm_flip_work_queue(&mdp5_crtc
->unref_cursor_work
, old_bo
);
586 /* enable vblank to complete cursor work: */
587 request_pending(crtc
, PENDING_CURSOR
);
592 static int mdp5_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
594 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
595 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
596 uint32_t flush_mask
= mdp_ctl_flush_mask_cursor(0);
601 /* In case the CRTC is disabled, just drop the cursor update */
602 if (unlikely(!crtc
->state
->enable
))
605 mdp5_crtc
->cursor
.x
= x
= max(x
, 0);
606 mdp5_crtc
->cursor
.y
= y
= max(y
, 0);
608 get_roi(crtc
, &roi_w
, &roi_h
);
610 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
611 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc
->lm
),
612 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h
) |
613 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w
));
614 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc
->lm
),
615 MDP5_LM_CURSOR_START_XY_Y_START(y
) |
616 MDP5_LM_CURSOR_START_XY_X_START(x
));
617 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
619 crtc_flush(crtc
, flush_mask
);
624 static const struct drm_crtc_funcs mdp5_crtc_funcs
= {
625 .set_config
= drm_atomic_helper_set_config
,
626 .destroy
= mdp5_crtc_destroy
,
627 .page_flip
= drm_atomic_helper_page_flip
,
628 .set_property
= mdp5_crtc_set_property
,
629 .reset
= drm_atomic_helper_crtc_reset
,
630 .atomic_duplicate_state
= drm_atomic_helper_crtc_duplicate_state
,
631 .atomic_destroy_state
= drm_atomic_helper_crtc_destroy_state
,
632 .cursor_set
= mdp5_crtc_cursor_set
,
633 .cursor_move
= mdp5_crtc_cursor_move
,
636 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs
= {
637 .mode_fixup
= mdp5_crtc_mode_fixup
,
638 .mode_set_nofb
= mdp5_crtc_mode_set_nofb
,
639 .disable
= mdp5_crtc_disable
,
640 .enable
= mdp5_crtc_enable
,
641 .atomic_check
= mdp5_crtc_atomic_check
,
642 .atomic_begin
= mdp5_crtc_atomic_begin
,
643 .atomic_flush
= mdp5_crtc_atomic_flush
,
646 static void mdp5_crtc_vblank_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
648 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, vblank
);
649 struct drm_crtc
*crtc
= &mdp5_crtc
->base
;
650 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
653 mdp_irq_unregister(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
655 pending
= atomic_xchg(&mdp5_crtc
->pending
, 0);
657 if (pending
& PENDING_FLIP
) {
658 complete_flip(crtc
, NULL
);
661 if (pending
& PENDING_CURSOR
)
662 drm_flip_work_commit(&mdp5_crtc
->unref_cursor_work
, priv
->wq
);
665 static void mdp5_crtc_err_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
667 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, err
);
669 DBG("%s: error: %08x", mdp5_crtc
->name
, irqstatus
);
672 static void mdp5_crtc_pp_done_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
674 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
,
677 complete(&mdp5_crtc
->pp_completion
);
680 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc
*crtc
)
682 struct drm_device
*dev
= crtc
->dev
;
683 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
686 ret
= wait_for_completion_timeout(&mdp5_crtc
->pp_completion
,
687 msecs_to_jiffies(50));
689 dev_warn(dev
->dev
, "pp done time out, lm=%d\n", mdp5_crtc
->lm
);
692 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc
*crtc
)
694 struct drm_device
*dev
= crtc
->dev
;
695 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
698 /* Should not call this function if crtc is disabled. */
702 ret
= drm_crtc_vblank_get(crtc
);
706 ret
= wait_event_timeout(dev
->vblank
[drm_crtc_index(crtc
)].queue
,
707 ((mdp5_ctl_get_commit_status(mdp5_crtc
->ctl
) &
708 mdp5_crtc
->flushed_mask
) == 0),
709 msecs_to_jiffies(50));
711 dev_warn(dev
->dev
, "vblank time out, crtc=%d\n", mdp5_crtc
->id
);
713 mdp5_crtc
->flushed_mask
= 0;
715 drm_crtc_vblank_put(crtc
);
718 uint32_t mdp5_crtc_vblank(struct drm_crtc
*crtc
)
720 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
721 return mdp5_crtc
->vblank
.irqmask
;
724 void mdp5_crtc_cancel_pending_flip(struct drm_crtc
*crtc
, struct drm_file
*file
)
726 DBG("cancel: %p", file
);
727 complete_flip(crtc
, file
);
730 void mdp5_crtc_set_pipeline(struct drm_crtc
*crtc
,
731 struct mdp5_interface
*intf
, struct mdp5_ctl
*ctl
)
733 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
734 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
735 int lm
= mdp5_crtc_get_lm(crtc
);
737 /* now that we know what irq's we want: */
738 mdp5_crtc
->err
.irqmask
= intf2err(intf
->num
);
739 mdp5_crtc
->vblank
.irqmask
= intf2vblank(lm
, intf
);
741 if ((intf
->type
== INTF_DSI
) &&
742 (intf
->mode
== MDP5_INTF_DSI_MODE_COMMAND
)) {
743 mdp5_crtc
->pp_done
.irqmask
= lm2ppdone(lm
);
744 mdp5_crtc
->pp_done
.irq
= mdp5_crtc_pp_done_irq
;
745 mdp5_crtc
->cmd_mode
= true;
747 mdp5_crtc
->pp_done
.irqmask
= 0;
748 mdp5_crtc
->pp_done
.irq
= NULL
;
749 mdp5_crtc
->cmd_mode
= false;
752 mdp_irq_update(&mdp5_kms
->base
);
754 mdp5_crtc
->ctl
= ctl
;
755 mdp5_ctl_set_pipeline(ctl
, intf
, lm
);
758 int mdp5_crtc_get_lm(struct drm_crtc
*crtc
)
760 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
761 return WARN_ON(!crtc
) ? -EINVAL
: mdp5_crtc
->lm
;
764 void mdp5_crtc_wait_for_commit_done(struct drm_crtc
*crtc
)
766 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
768 if (mdp5_crtc
->cmd_mode
)
769 mdp5_crtc_wait_for_pp_done(crtc
);
771 mdp5_crtc_wait_for_flush_done(crtc
);
774 /* initialize crtc */
775 struct drm_crtc
*mdp5_crtc_init(struct drm_device
*dev
,
776 struct drm_plane
*plane
, int id
)
778 struct drm_crtc
*crtc
= NULL
;
779 struct mdp5_crtc
*mdp5_crtc
;
781 mdp5_crtc
= kzalloc(sizeof(*mdp5_crtc
), GFP_KERNEL
);
783 return ERR_PTR(-ENOMEM
);
785 crtc
= &mdp5_crtc
->base
;
788 mdp5_crtc
->lm
= GET_LM_ID(id
);
790 spin_lock_init(&mdp5_crtc
->lm_lock
);
791 spin_lock_init(&mdp5_crtc
->cursor
.lock
);
792 init_completion(&mdp5_crtc
->pp_completion
);
794 mdp5_crtc
->vblank
.irq
= mdp5_crtc_vblank_irq
;
795 mdp5_crtc
->err
.irq
= mdp5_crtc_err_irq
;
797 snprintf(mdp5_crtc
->name
, sizeof(mdp5_crtc
->name
), "%s:%d",
798 pipe2name(mdp5_plane_pipe(plane
)), id
);
800 drm_crtc_init_with_planes(dev
, crtc
, plane
, NULL
, &mdp5_crtc_funcs
);
802 drm_flip_work_init(&mdp5_crtc
->unref_cursor_work
,
803 "unref cursor", unref_cursor_worker
);
805 drm_crtc_helper_add(crtc
, &mdp5_crtc_helper_funcs
);