1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2014 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <drm/drm_atomic_uapi.h>
8 #include <drm/drm_gem_framebuffer_helper.h>
9 #include <drm/drm_vblank.h>
11 #include "msm_atomic_trace.h"
16 int msm_atomic_prepare_fb(struct drm_plane
*plane
,
17 struct drm_plane_state
*new_state
)
19 struct msm_drm_private
*priv
= plane
->dev
->dev_private
;
20 struct msm_kms
*kms
= priv
->kms
;
25 drm_gem_fb_prepare_fb(plane
, new_state
);
27 return msm_framebuffer_prepare(new_state
->fb
, kms
->aspace
);
31 * Helpers to control vblanks while we flush.. basically just to ensure
32 * that vblank accounting is switched on, so we get valid seqn/timestamp
33 * on pageflip events (if requested)
36 static void vblank_get(struct msm_kms
*kms
, unsigned crtc_mask
)
38 struct drm_crtc
*crtc
;
40 for_each_crtc_mask(kms
->dev
, crtc
, crtc_mask
) {
41 if (!crtc
->state
->active
)
43 drm_crtc_vblank_get(crtc
);
47 static void vblank_put(struct msm_kms
*kms
, unsigned crtc_mask
)
49 struct drm_crtc
*crtc
;
51 for_each_crtc_mask(kms
->dev
, crtc
, crtc_mask
) {
52 if (!crtc
->state
->active
)
54 drm_crtc_vblank_put(crtc
);
58 static void lock_crtcs(struct msm_kms
*kms
, unsigned int crtc_mask
)
60 struct drm_crtc
*crtc
;
62 for_each_crtc_mask(kms
->dev
, crtc
, crtc_mask
)
63 mutex_lock(&kms
->commit_lock
[drm_crtc_index(crtc
)]);
66 static void unlock_crtcs(struct msm_kms
*kms
, unsigned int crtc_mask
)
68 struct drm_crtc
*crtc
;
70 for_each_crtc_mask_reverse(kms
->dev
, crtc
, crtc_mask
)
71 mutex_unlock(&kms
->commit_lock
[drm_crtc_index(crtc
)]);
74 static void msm_atomic_async_commit(struct msm_kms
*kms
, int crtc_idx
)
76 unsigned crtc_mask
= BIT(crtc_idx
);
78 trace_msm_atomic_async_commit_start(crtc_mask
);
80 lock_crtcs(kms
, crtc_mask
);
82 if (!(kms
->pending_crtc_mask
& crtc_mask
)) {
83 unlock_crtcs(kms
, crtc_mask
);
87 kms
->pending_crtc_mask
&= ~crtc_mask
;
89 kms
->funcs
->enable_commit(kms
);
91 vblank_get(kms
, crtc_mask
);
94 * Flush hardware updates:
96 trace_msm_atomic_flush_commit(crtc_mask
);
97 kms
->funcs
->flush_commit(kms
, crtc_mask
);
100 * Wait for flush to complete:
102 trace_msm_atomic_wait_flush_start(crtc_mask
);
103 kms
->funcs
->wait_flush(kms
, crtc_mask
);
104 trace_msm_atomic_wait_flush_finish(crtc_mask
);
106 vblank_put(kms
, crtc_mask
);
108 kms
->funcs
->complete_commit(kms
, crtc_mask
);
109 unlock_crtcs(kms
, crtc_mask
);
110 kms
->funcs
->disable_commit(kms
);
113 trace_msm_atomic_async_commit_finish(crtc_mask
);
116 static enum hrtimer_restart
msm_atomic_pending_timer(struct hrtimer
*t
)
118 struct msm_pending_timer
*timer
= container_of(t
,
119 struct msm_pending_timer
, timer
);
121 kthread_queue_work(timer
->worker
, &timer
->work
);
123 return HRTIMER_NORESTART
;
126 static void msm_atomic_pending_work(struct kthread_work
*work
)
128 struct msm_pending_timer
*timer
= container_of(work
,
129 struct msm_pending_timer
, work
);
131 msm_atomic_async_commit(timer
->kms
, timer
->crtc_idx
);
134 int msm_atomic_init_pending_timer(struct msm_pending_timer
*timer
,
135 struct msm_kms
*kms
, int crtc_idx
)
138 timer
->crtc_idx
= crtc_idx
;
139 hrtimer_init(&timer
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
140 timer
->timer
.function
= msm_atomic_pending_timer
;
142 timer
->worker
= kthread_create_worker(0, "atomic-worker-%d", crtc_idx
);
143 if (IS_ERR(timer
->worker
)) {
144 int ret
= PTR_ERR(timer
->worker
);
145 timer
->worker
= NULL
;
148 sched_set_fifo(timer
->worker
->task
);
149 kthread_init_work(&timer
->work
, msm_atomic_pending_work
);
154 void msm_atomic_destroy_pending_timer(struct msm_pending_timer
*timer
)
157 kthread_destroy_worker(timer
->worker
);
160 static bool can_do_async(struct drm_atomic_state
*state
,
161 struct drm_crtc
**async_crtc
)
163 struct drm_connector_state
*connector_state
;
164 struct drm_connector
*connector
;
165 struct drm_crtc_state
*crtc_state
;
166 struct drm_crtc
*crtc
;
167 int i
, num_crtcs
= 0;
169 if (!(state
->legacy_cursor_update
|| state
->async_update
))
172 /* any connector change, means slow path: */
173 for_each_new_connector_in_state(state
, connector
, connector_state
, i
)
176 for_each_new_crtc_in_state(state
, crtc
, crtc_state
, i
) {
177 if (drm_atomic_crtc_needs_modeset(crtc_state
))
187 /* Get bitmask of crtcs that will need to be flushed. The bitmask
188 * can be used with for_each_crtc_mask() iterator, to iterate
189 * effected crtcs without needing to preserve the atomic state.
191 static unsigned get_crtc_mask(struct drm_atomic_state
*state
)
193 struct drm_crtc_state
*crtc_state
;
194 struct drm_crtc
*crtc
;
195 unsigned i
, mask
= 0;
197 for_each_new_crtc_in_state(state
, crtc
, crtc_state
, i
)
198 mask
|= drm_crtc_mask(crtc
);
203 void msm_atomic_commit_tail(struct drm_atomic_state
*state
)
205 struct drm_device
*dev
= state
->dev
;
206 struct msm_drm_private
*priv
= dev
->dev_private
;
207 struct msm_kms
*kms
= priv
->kms
;
208 struct drm_crtc
*async_crtc
= NULL
;
209 unsigned crtc_mask
= get_crtc_mask(state
);
210 bool async
= kms
->funcs
->vsync_time
&&
211 can_do_async(state
, &async_crtc
);
213 trace_msm_atomic_commit_tail_start(async
, crtc_mask
);
215 kms
->funcs
->enable_commit(kms
);
218 * Ensure any previous (potentially async) commit has
221 lock_crtcs(kms
, crtc_mask
);
222 trace_msm_atomic_wait_flush_start(crtc_mask
);
223 kms
->funcs
->wait_flush(kms
, crtc_mask
);
224 trace_msm_atomic_wait_flush_finish(crtc_mask
);
227 * Now that there is no in-progress flush, prepare the
230 kms
->funcs
->prepare_commit(kms
, state
);
233 * Push atomic updates down to hardware:
235 drm_atomic_helper_commit_modeset_disables(dev
, state
);
236 drm_atomic_helper_commit_planes(dev
, state
, 0);
237 drm_atomic_helper_commit_modeset_enables(dev
, state
);
240 struct msm_pending_timer
*timer
=
241 &kms
->pending_timers
[drm_crtc_index(async_crtc
)];
243 /* async updates are limited to single-crtc updates: */
244 WARN_ON(crtc_mask
!= drm_crtc_mask(async_crtc
));
247 * Start timer if we don't already have an update pending
250 if (!(kms
->pending_crtc_mask
& crtc_mask
)) {
251 ktime_t vsync_time
, wakeup_time
;
253 kms
->pending_crtc_mask
|= crtc_mask
;
255 vsync_time
= kms
->funcs
->vsync_time(kms
, async_crtc
);
256 wakeup_time
= ktime_sub(vsync_time
, ms_to_ktime(1));
258 hrtimer_start(&timer
->timer
, wakeup_time
,
262 kms
->funcs
->disable_commit(kms
);
263 unlock_crtcs(kms
, crtc_mask
);
265 * At this point, from drm core's perspective, we
266 * are done with the atomic update, so we can just
267 * go ahead and signal that it is done:
269 drm_atomic_helper_commit_hw_done(state
);
270 drm_atomic_helper_cleanup_planes(dev
, state
);
272 trace_msm_atomic_commit_tail_finish(async
, crtc_mask
);
278 * If there is any async flush pending on updated crtcs, fold
279 * them into the current flush.
281 kms
->pending_crtc_mask
&= ~crtc_mask
;
283 vblank_get(kms
, crtc_mask
);
286 * Flush hardware updates:
288 trace_msm_atomic_flush_commit(crtc_mask
);
289 kms
->funcs
->flush_commit(kms
, crtc_mask
);
290 unlock_crtcs(kms
, crtc_mask
);
292 * Wait for flush to complete:
294 trace_msm_atomic_wait_flush_start(crtc_mask
);
295 kms
->funcs
->wait_flush(kms
, crtc_mask
);
296 trace_msm_atomic_wait_flush_finish(crtc_mask
);
298 vblank_put(kms
, crtc_mask
);
300 lock_crtcs(kms
, crtc_mask
);
301 kms
->funcs
->complete_commit(kms
, crtc_mask
);
302 unlock_crtcs(kms
, crtc_mask
);
303 kms
->funcs
->disable_commit(kms
);
305 drm_atomic_helper_commit_hw_done(state
);
306 drm_atomic_helper_cleanup_planes(dev
, state
);
308 trace_msm_atomic_commit_tail_finish(async
, crtc_mask
);