2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <drm/drm_mode.h>
22 #include "drm_crtc_helper.h"
23 #include "drm_flip_work.h"
28 struct drm_plane
*plane
;
29 struct drm_plane
*planes
[8];
33 /* which mixer/encoder we route output to: */
36 /* if there is a pending flip, these will be non-null: */
37 struct drm_pending_vblank_event
*event
;
38 struct msm_fence_cb pageflip_cb
;
40 #define PENDING_CURSOR 0x1
41 #define PENDING_FLIP 0x2
44 /* the fb that we logically (from PoV of KMS API) hold a ref
45 * to. Which we may not yet be scanning out (we may still
46 * be scanning out previous in case of page_flip while waiting
47 * for gpu rendering to complete:
49 struct drm_framebuffer
*fb
;
51 /* the fb that we currently hold a scanout ref to: */
52 struct drm_framebuffer
*scanout_fb
;
54 /* for unref'ing framebuffers after scanout completes: */
55 struct drm_flip_work unref_fb_work
;
57 struct mdp_irq vblank
;
60 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
62 static struct mdp5_kms
*get_kms(struct drm_crtc
*crtc
)
64 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
65 return to_mdp5_kms(to_mdp_kms(priv
->kms
));
68 static void request_pending(struct drm_crtc
*crtc
, uint32_t pending
)
70 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
72 atomic_or(pending
, &mdp5_crtc
->pending
);
73 mdp_irq_register(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
76 static void crtc_flush(struct drm_crtc
*crtc
)
78 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
79 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
80 int id
= mdp5_crtc
->id
;
81 uint32_t i
, flush
= 0;
83 for (i
= 0; i
< ARRAY_SIZE(mdp5_crtc
->planes
); i
++) {
84 struct drm_plane
*plane
= mdp5_crtc
->planes
[i
];
86 enum mdp5_pipe pipe
= mdp5_plane_pipe(plane
);
87 flush
|= pipe2flush(pipe
);
90 flush
|= mixer2flush(mdp5_crtc
->id
);
91 flush
|= MDP5_CTL_FLUSH_CTL
;
93 DBG("%s: flush=%08x", mdp5_crtc
->name
, flush
);
95 mdp5_write(mdp5_kms
, REG_MDP5_CTL_FLUSH(id
), flush
);
98 static void update_fb(struct drm_crtc
*crtc
, struct drm_framebuffer
*new_fb
)
100 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
101 struct drm_framebuffer
*old_fb
= mdp5_crtc
->fb
;
103 /* grab reference to incoming scanout fb: */
104 drm_framebuffer_reference(new_fb
);
105 mdp5_crtc
->base
.primary
->fb
= new_fb
;
106 mdp5_crtc
->fb
= new_fb
;
109 drm_flip_work_queue(&mdp5_crtc
->unref_fb_work
, old_fb
);
112 /* unlike update_fb(), take a ref to the new scanout fb *before* updating
113 * plane, then call this. Needed to ensure we don't unref the buffer that
114 * is actually still being scanned out.
116 * Note that this whole thing goes away with atomic.. since we can defer
117 * calling into driver until rendering is done.
119 static void update_scanout(struct drm_crtc
*crtc
, struct drm_framebuffer
*fb
)
121 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
123 /* flush updates, to make sure hw is updated to new scanout fb,
124 * so that we can safely queue unref to current fb (ie. next
125 * vblank we know hw is done w/ previous scanout_fb).
129 if (mdp5_crtc
->scanout_fb
)
130 drm_flip_work_queue(&mdp5_crtc
->unref_fb_work
,
131 mdp5_crtc
->scanout_fb
);
133 mdp5_crtc
->scanout_fb
= fb
;
135 /* enable vblank to complete flip: */
136 request_pending(crtc
, PENDING_FLIP
);
139 /* if file!=NULL, this is preclose potential cancel-flip path */
140 static void complete_flip(struct drm_crtc
*crtc
, struct drm_file
*file
)
142 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
143 struct drm_device
*dev
= crtc
->dev
;
144 struct drm_pending_vblank_event
*event
;
145 unsigned long flags
, i
;
147 spin_lock_irqsave(&dev
->event_lock
, flags
);
148 event
= mdp5_crtc
->event
;
150 /* if regular vblank case (!file) or if cancel-flip from
151 * preclose on file that requested flip, then send the
154 if (!file
|| (event
->base
.file_priv
== file
)) {
155 mdp5_crtc
->event
= NULL
;
156 drm_send_vblank_event(dev
, mdp5_crtc
->id
, event
);
159 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
161 for (i
= 0; i
< ARRAY_SIZE(mdp5_crtc
->planes
); i
++) {
162 struct drm_plane
*plane
= mdp5_crtc
->planes
[i
];
164 mdp5_plane_complete_flip(plane
);
168 static void pageflip_cb(struct msm_fence_cb
*cb
)
170 struct mdp5_crtc
*mdp5_crtc
=
171 container_of(cb
, struct mdp5_crtc
, pageflip_cb
);
172 struct drm_crtc
*crtc
= &mdp5_crtc
->base
;
173 struct drm_framebuffer
*fb
= mdp5_crtc
->fb
;
178 drm_framebuffer_reference(fb
);
179 mdp5_plane_set_scanout(mdp5_crtc
->plane
, fb
);
180 update_scanout(crtc
, fb
);
183 static void unref_fb_worker(struct drm_flip_work
*work
, void *val
)
185 struct mdp5_crtc
*mdp5_crtc
=
186 container_of(work
, struct mdp5_crtc
, unref_fb_work
);
187 struct drm_device
*dev
= mdp5_crtc
->base
.dev
;
189 mutex_lock(&dev
->mode_config
.mutex
);
190 drm_framebuffer_unreference(val
);
191 mutex_unlock(&dev
->mode_config
.mutex
);
194 static void mdp5_crtc_destroy(struct drm_crtc
*crtc
)
196 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
198 drm_crtc_cleanup(crtc
);
199 drm_flip_work_cleanup(&mdp5_crtc
->unref_fb_work
);
204 static void mdp5_crtc_dpms(struct drm_crtc
*crtc
, int mode
)
206 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
207 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
208 bool enabled
= (mode
== DRM_MODE_DPMS_ON
);
210 DBG("%s: mode=%d", mdp5_crtc
->name
, mode
);
212 if (enabled
!= mdp5_crtc
->enabled
) {
214 mdp5_enable(mdp5_kms
);
215 mdp_irq_register(&mdp5_kms
->base
, &mdp5_crtc
->err
);
217 mdp_irq_unregister(&mdp5_kms
->base
, &mdp5_crtc
->err
);
218 mdp5_disable(mdp5_kms
);
220 mdp5_crtc
->enabled
= enabled
;
224 static bool mdp5_crtc_mode_fixup(struct drm_crtc
*crtc
,
225 const struct drm_display_mode
*mode
,
226 struct drm_display_mode
*adjusted_mode
)
231 static void blend_setup(struct drm_crtc
*crtc
)
233 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
234 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
235 int id
= mdp5_crtc
->id
;
238 * Hard-coded setup for now until I figure out how the
243 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(id
),
244 MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA
);
245 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_OP_MODE(id
, 0),
246 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST
) |
247 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL
) |
248 MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA
);
249 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_FG_ALPHA(id
, 0), 0xff);
250 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_BG_ALPHA(id
, 0), 0x00);
252 /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
253 * we want to be setting CTL[m].LAYER[n]. Not sure what the
254 * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
255 * used when chaining up mixers for high resolution displays?
259 mdp5_write(mdp5_kms
, REG_MDP5_CTL_LAYER_REG(id
, 0),
260 MDP5_CTL_LAYER_REG_RGB0(STAGE0
) |
261 MDP5_CTL_LAYER_REG_BORDER_COLOR
);
262 mdp5_write(mdp5_kms
, REG_MDP5_CTL_LAYER_REG(id
, 1), 0);
263 mdp5_write(mdp5_kms
, REG_MDP5_CTL_LAYER_REG(id
, 2), 0);
264 mdp5_write(mdp5_kms
, REG_MDP5_CTL_LAYER_REG(id
, 3), 0);
265 mdp5_write(mdp5_kms
, REG_MDP5_CTL_LAYER_REG(id
, 4), 0);
268 static int mdp5_crtc_mode_set(struct drm_crtc
*crtc
,
269 struct drm_display_mode
*mode
,
270 struct drm_display_mode
*adjusted_mode
,
272 struct drm_framebuffer
*old_fb
)
274 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
275 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
278 mode
= adjusted_mode
;
280 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
281 mdp5_crtc
->name
, mode
->base
.id
, mode
->name
,
282 mode
->vrefresh
, mode
->clock
,
283 mode
->hdisplay
, mode
->hsync_start
,
284 mode
->hsync_end
, mode
->htotal
,
285 mode
->vdisplay
, mode
->vsync_start
,
286 mode
->vsync_end
, mode
->vtotal
,
287 mode
->type
, mode
->flags
);
289 /* grab extra ref for update_scanout() */
290 drm_framebuffer_reference(crtc
->primary
->fb
);
292 ret
= mdp5_plane_mode_set(mdp5_crtc
->plane
, crtc
, crtc
->primary
->fb
,
293 0, 0, mode
->hdisplay
, mode
->vdisplay
,
295 mode
->hdisplay
<< 16, mode
->vdisplay
<< 16);
297 drm_framebuffer_unreference(crtc
->primary
->fb
);
298 dev_err(crtc
->dev
->dev
, "%s: failed to set mode on plane: %d\n",
299 mdp5_crtc
->name
, ret
);
303 mdp5_write(mdp5_kms
, REG_MDP5_LM_OUT_SIZE(mdp5_crtc
->id
),
304 MDP5_LM_OUT_SIZE_WIDTH(mode
->hdisplay
) |
305 MDP5_LM_OUT_SIZE_HEIGHT(mode
->vdisplay
));
307 update_fb(crtc
, crtc
->primary
->fb
);
308 update_scanout(crtc
, crtc
->primary
->fb
);
313 static void mdp5_crtc_prepare(struct drm_crtc
*crtc
)
315 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
316 DBG("%s", mdp5_crtc
->name
);
317 /* make sure we hold a ref to mdp clks while setting up mode: */
318 mdp5_enable(get_kms(crtc
));
319 mdp5_crtc_dpms(crtc
, DRM_MODE_DPMS_OFF
);
322 static void mdp5_crtc_commit(struct drm_crtc
*crtc
)
324 mdp5_crtc_dpms(crtc
, DRM_MODE_DPMS_ON
);
326 /* drop the ref to mdp clk's that we got in prepare: */
327 mdp5_disable(get_kms(crtc
));
330 static int mdp5_crtc_mode_set_base(struct drm_crtc
*crtc
, int x
, int y
,
331 struct drm_framebuffer
*old_fb
)
333 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
334 struct drm_plane
*plane
= mdp5_crtc
->plane
;
335 struct drm_display_mode
*mode
= &crtc
->mode
;
338 /* grab extra ref for update_scanout() */
339 drm_framebuffer_reference(crtc
->primary
->fb
);
341 ret
= mdp5_plane_mode_set(plane
, crtc
, crtc
->primary
->fb
,
342 0, 0, mode
->hdisplay
, mode
->vdisplay
,
344 mode
->hdisplay
<< 16, mode
->vdisplay
<< 16);
346 drm_framebuffer_unreference(crtc
->primary
->fb
);
350 update_fb(crtc
, crtc
->primary
->fb
);
351 update_scanout(crtc
, crtc
->primary
->fb
);
356 static void mdp5_crtc_load_lut(struct drm_crtc
*crtc
)
360 static int mdp5_crtc_page_flip(struct drm_crtc
*crtc
,
361 struct drm_framebuffer
*new_fb
,
362 struct drm_pending_vblank_event
*event
,
363 uint32_t page_flip_flags
)
365 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
366 struct drm_device
*dev
= crtc
->dev
;
367 struct drm_gem_object
*obj
;
370 if (mdp5_crtc
->event
) {
371 dev_err(dev
->dev
, "already pending flip!\n");
375 obj
= msm_framebuffer_bo(new_fb
, 0);
377 spin_lock_irqsave(&dev
->event_lock
, flags
);
378 mdp5_crtc
->event
= event
;
379 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
381 update_fb(crtc
, new_fb
);
383 return msm_gem_queue_inactive_cb(obj
, &mdp5_crtc
->pageflip_cb
);
386 static int mdp5_crtc_set_property(struct drm_crtc
*crtc
,
387 struct drm_property
*property
, uint64_t val
)
393 static const struct drm_crtc_funcs mdp5_crtc_funcs
= {
394 .set_config
= drm_crtc_helper_set_config
,
395 .destroy
= mdp5_crtc_destroy
,
396 .page_flip
= mdp5_crtc_page_flip
,
397 .set_property
= mdp5_crtc_set_property
,
400 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs
= {
401 .dpms
= mdp5_crtc_dpms
,
402 .mode_fixup
= mdp5_crtc_mode_fixup
,
403 .mode_set
= mdp5_crtc_mode_set
,
404 .prepare
= mdp5_crtc_prepare
,
405 .commit
= mdp5_crtc_commit
,
406 .mode_set_base
= mdp5_crtc_mode_set_base
,
407 .load_lut
= mdp5_crtc_load_lut
,
410 static void mdp5_crtc_vblank_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
412 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, vblank
);
413 struct drm_crtc
*crtc
= &mdp5_crtc
->base
;
414 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
417 mdp_irq_unregister(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
419 pending
= atomic_xchg(&mdp5_crtc
->pending
, 0);
421 if (pending
& PENDING_FLIP
) {
422 complete_flip(crtc
, NULL
);
423 drm_flip_work_commit(&mdp5_crtc
->unref_fb_work
, priv
->wq
);
427 static void mdp5_crtc_err_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
429 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, err
);
430 struct drm_crtc
*crtc
= &mdp5_crtc
->base
;
431 DBG("%s: error: %08x", mdp5_crtc
->name
, irqstatus
);
435 uint32_t mdp5_crtc_vblank(struct drm_crtc
*crtc
)
437 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
438 return mdp5_crtc
->vblank
.irqmask
;
441 void mdp5_crtc_cancel_pending_flip(struct drm_crtc
*crtc
, struct drm_file
*file
)
443 DBG("cancel: %p", file
);
444 complete_flip(crtc
, file
);
447 /* set interface for routing crtc->encoder: */
448 void mdp5_crtc_set_intf(struct drm_crtc
*crtc
, int intf
,
449 enum mdp5_intf intf_id
)
451 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
452 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
453 static const enum mdp5_intfnum intfnum
[] = {
454 INTF0
, INTF1
, INTF2
, INTF3
,
458 /* now that we know what irq's we want: */
459 mdp5_crtc
->err
.irqmask
= intf2err(intf
);
460 mdp5_crtc
->vblank
.irqmask
= intf2vblank(intf
);
462 /* when called from modeset_init(), skip the rest until later: */
466 intf_sel
= mdp5_read(mdp5_kms
, REG_MDP5_DISP_INTF_SEL
);
470 intf_sel
&= ~MDP5_DISP_INTF_SEL_INTF0__MASK
;
471 intf_sel
|= MDP5_DISP_INTF_SEL_INTF0(intf_id
);
474 intf_sel
&= ~MDP5_DISP_INTF_SEL_INTF1__MASK
;
475 intf_sel
|= MDP5_DISP_INTF_SEL_INTF1(intf_id
);
478 intf_sel
&= ~MDP5_DISP_INTF_SEL_INTF2__MASK
;
479 intf_sel
|= MDP5_DISP_INTF_SEL_INTF2(intf_id
);
482 intf_sel
&= ~MDP5_DISP_INTF_SEL_INTF3__MASK
;
483 intf_sel
|= MDP5_DISP_INTF_SEL_INTF3(intf_id
);
492 DBG("%s: intf_sel=%08x", mdp5_crtc
->name
, intf_sel
);
494 mdp5_write(mdp5_kms
, REG_MDP5_DISP_INTF_SEL
, intf_sel
);
495 mdp5_write(mdp5_kms
, REG_MDP5_CTL_OP(mdp5_crtc
->id
),
496 MDP5_CTL_OP_MODE(MODE_NONE
) |
497 MDP5_CTL_OP_INTF_NUM(intfnum
[intf
]));
502 static void set_attach(struct drm_crtc
*crtc
, enum mdp5_pipe pipe_id
,
503 struct drm_plane
*plane
)
505 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
507 BUG_ON(pipe_id
>= ARRAY_SIZE(mdp5_crtc
->planes
));
509 if (mdp5_crtc
->planes
[pipe_id
] == plane
)
512 mdp5_crtc
->planes
[pipe_id
] = plane
;
514 if (mdp5_crtc
->enabled
&& (plane
!= mdp5_crtc
->plane
))
518 void mdp5_crtc_attach(struct drm_crtc
*crtc
, struct drm_plane
*plane
)
520 set_attach(crtc
, mdp5_plane_pipe(plane
), plane
);
523 void mdp5_crtc_detach(struct drm_crtc
*crtc
, struct drm_plane
*plane
)
525 /* don't actually detatch our primary plane: */
526 if (to_mdp5_crtc(crtc
)->plane
== plane
)
528 set_attach(crtc
, mdp5_plane_pipe(plane
), NULL
);
531 /* initialize crtc */
532 struct drm_crtc
*mdp5_crtc_init(struct drm_device
*dev
,
533 struct drm_plane
*plane
, int id
)
535 struct drm_crtc
*crtc
= NULL
;
536 struct mdp5_crtc
*mdp5_crtc
;
539 mdp5_crtc
= kzalloc(sizeof(*mdp5_crtc
), GFP_KERNEL
);
545 crtc
= &mdp5_crtc
->base
;
547 mdp5_crtc
->plane
= plane
;
550 mdp5_crtc
->vblank
.irq
= mdp5_crtc_vblank_irq
;
551 mdp5_crtc
->err
.irq
= mdp5_crtc_err_irq
;
553 snprintf(mdp5_crtc
->name
, sizeof(mdp5_crtc
->name
), "%s:%d",
554 pipe2name(mdp5_plane_pipe(plane
)), id
);
556 ret
= drm_flip_work_init(&mdp5_crtc
->unref_fb_work
, 16,
557 "unref fb", unref_fb_worker
);
561 INIT_FENCE_CB(&mdp5_crtc
->pageflip_cb
, pageflip_cb
);
563 drm_crtc_init_with_planes(dev
, crtc
, plane
, NULL
, &mdp5_crtc_funcs
);
564 drm_crtc_helper_add(crtc
, &mdp5_crtc_helper_funcs
);
566 mdp5_plane_install_properties(mdp5_crtc
->plane
, &crtc
->base
);
572 mdp5_crtc_destroy(crtc
);