2 * Copyright (C) 2015 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
12 * This is the general code for implementing KMS mode setting that
13 * doesn't clearly associate with any of the other objects (plane,
14 * crtc, HDMI encoder).
18 #include "drm_atomic.h"
19 #include "drm_atomic_helper.h"
20 #include "drm_crtc_helper.h"
21 #include "drm_plane_helper.h"
22 #include "drm_fb_cma_helper.h"
25 static void vc4_output_poll_changed(struct drm_device
*dev
)
27 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
29 drm_fbdev_cma_hotplug_event(vc4
->fbdev
);
33 struct drm_device
*dev
;
34 struct drm_atomic_state
*state
;
35 struct vc4_seqno_cb cb
;
39 vc4_atomic_complete_commit(struct vc4_commit
*c
)
41 struct drm_atomic_state
*state
= c
->state
;
42 struct drm_device
*dev
= state
->dev
;
43 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
45 drm_atomic_helper_commit_modeset_disables(dev
, state
);
47 drm_atomic_helper_commit_planes(dev
, state
, 0);
49 drm_atomic_helper_commit_modeset_enables(dev
, state
);
51 /* Make sure that drm_atomic_helper_wait_for_vblanks()
52 * actually waits for vblank. If we're doing a full atomic
53 * modeset (as opposed to a vc4_update_plane() short circuit),
54 * then we need to wait for scanout to be done with our
55 * display lists before we free it and potentially reallocate
56 * and overwrite the dlist memory with a new modeset.
58 state
->legacy_cursor_update
= false;
60 drm_atomic_helper_wait_for_vblanks(dev
, state
);
62 drm_atomic_helper_cleanup_planes(dev
, state
);
64 drm_atomic_state_put(state
);
66 up(&vc4
->async_modeset
);
72 vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb
*cb
)
74 struct vc4_commit
*c
= container_of(cb
, struct vc4_commit
, cb
);
76 vc4_atomic_complete_commit(c
);
79 static struct vc4_commit
*commit_init(struct drm_atomic_state
*state
)
81 struct vc4_commit
*c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
92 * vc4_atomic_commit - commit validated state object
94 * @state: the driver state object
95 * @nonblock: nonblocking commit
97 * This function commits a with drm_atomic_helper_check() pre-validated state
98 * object. This can still fail when e.g. the framebuffer reservation fails. For
99 * now this doesn't implement asynchronous commits.
102 * Zero for success or -errno.
104 static int vc4_atomic_commit(struct drm_device
*dev
,
105 struct drm_atomic_state
*state
,
108 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
111 uint64_t wait_seqno
= 0;
112 struct vc4_commit
*c
;
113 struct drm_plane
*plane
;
114 struct drm_plane_state
*new_state
;
116 c
= commit_init(state
);
120 /* Make sure that any outstanding modesets have finished. */
122 struct drm_crtc
*crtc
;
123 struct drm_crtc_state
*crtc_state
;
128 * If there's an undispatched event to send then we're
129 * obviously still busy. If there isn't, then we can
130 * unconditionally wait for the semaphore because it
131 * shouldn't be contended (for long).
133 * This is to prevent a race where queuing a new flip
134 * from userspace immediately on receipt of an event
135 * beats our clean-up and returns EBUSY.
137 spin_lock_irqsave(&dev
->event_lock
, flags
);
138 for_each_crtc_in_state(state
, crtc
, crtc_state
, i
)
139 busy
|= vc4_event_pending(crtc
);
140 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
146 ret
= down_interruptible(&vc4
->async_modeset
);
152 ret
= drm_atomic_helper_prepare_planes(dev
, state
);
155 up(&vc4
->async_modeset
);
159 for_each_plane_in_state(state
, plane
, new_state
, i
) {
160 if ((plane
->state
->fb
!= new_state
->fb
) && new_state
->fb
) {
161 struct drm_gem_cma_object
*cma_bo
=
162 drm_fb_cma_get_gem_obj(new_state
->fb
, 0);
163 struct vc4_bo
*bo
= to_vc4_bo(&cma_bo
->base
);
165 wait_seqno
= max(bo
->seqno
, wait_seqno
);
170 * This is the point of no return - everything below never fails except
171 * when the hw goes bonghits. Which means we can commit the new state on
172 * the software side now.
175 drm_atomic_helper_swap_state(state
, true);
178 * Everything below can be run asynchronously without the need to grab
179 * any modeset locks at all under one condition: It must be guaranteed
180 * that the asynchronous work has either been cancelled (if the driver
181 * supports it, which at least requires that the framebuffers get
182 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
183 * before the new state gets committed on the software side with
184 * drm_atomic_helper_swap_state().
186 * This scheme allows new atomic state updates to be prepared and
187 * checked in parallel to the asynchronous completion of the previous
188 * update. Which is important since compositors need to figure out the
189 * composition of the next frame right after having submitted the
193 drm_atomic_state_get(state
);
195 vc4_queue_seqno_cb(dev
, &c
->cb
, wait_seqno
,
196 vc4_atomic_complete_commit_seqno_cb
);
198 vc4_wait_for_seqno(dev
, wait_seqno
, ~0ull, false);
199 vc4_atomic_complete_commit(c
);
205 static const struct drm_mode_config_funcs vc4_mode_funcs
= {
206 .output_poll_changed
= vc4_output_poll_changed
,
207 .atomic_check
= drm_atomic_helper_check
,
208 .atomic_commit
= vc4_atomic_commit
,
209 .fb_create
= drm_fb_cma_create
,
212 int vc4_kms_load(struct drm_device
*dev
)
214 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
217 sema_init(&vc4
->async_modeset
, 1);
219 ret
= drm_vblank_init(dev
, dev
->mode_config
.num_crtc
);
221 dev_err(dev
->dev
, "failed to initialize vblank\n");
225 dev
->mode_config
.max_width
= 2048;
226 dev
->mode_config
.max_height
= 2048;
227 dev
->mode_config
.funcs
= &vc4_mode_funcs
;
228 dev
->mode_config
.preferred_depth
= 24;
229 dev
->mode_config
.async_page_flip
= true;
231 drm_mode_config_reset(dev
);
233 vc4
->fbdev
= drm_fbdev_cma_init(dev
, 32,
234 dev
->mode_config
.num_crtc
,
235 dev
->mode_config
.num_connector
);
236 if (IS_ERR(vc4
->fbdev
))
239 drm_kms_helper_poll_init(dev
);