treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / vkms / vkms_crtc.c
blob74f703b8d22a511674a8001eba16b07c5402ed1b
1 // SPDX-License-Identifier: GPL-2.0+
3 #include <drm/drm_atomic.h>
4 #include <drm/drm_atomic_helper.h>
5 #include <drm/drm_probe_helper.h>
6 #include <drm/drm_vblank.h>
8 #include "vkms_drv.h"
10 static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
12 struct vkms_output *output = container_of(timer, struct vkms_output,
13 vblank_hrtimer);
14 struct drm_crtc *crtc = &output->crtc;
15 struct vkms_crtc_state *state;
16 u64 ret_overrun;
17 bool ret;
19 ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
20 output->period_ns);
21 WARN_ON(ret_overrun != 1);
23 spin_lock(&output->lock);
24 ret = drm_crtc_handle_vblank(crtc);
25 if (!ret)
26 DRM_ERROR("vkms failure on handling vblank");
28 state = output->composer_state;
29 spin_unlock(&output->lock);
31 if (state && output->composer_enabled) {
32 u64 frame = drm_crtc_accurate_vblank_count(crtc);
34 /* update frame_start only if a queued vkms_composer_worker()
35 * has read the data
37 spin_lock(&output->composer_lock);
38 if (!state->crc_pending)
39 state->frame_start = frame;
40 else
41 DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
42 state->frame_start, frame);
43 state->frame_end = frame;
44 state->crc_pending = true;
45 spin_unlock(&output->composer_lock);
47 ret = queue_work(output->composer_workq, &state->composer_work);
48 if (!ret)
49 DRM_DEBUG_DRIVER("Composer worker already queued\n");
52 return HRTIMER_RESTART;
55 static int vkms_enable_vblank(struct drm_crtc *crtc)
57 struct drm_device *dev = crtc->dev;
58 unsigned int pipe = drm_crtc_index(crtc);
59 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
60 struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
62 drm_calc_timestamping_constants(crtc, &crtc->mode);
64 hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
65 out->vblank_hrtimer.function = &vkms_vblank_simulate;
66 out->period_ns = ktime_set(0, vblank->framedur_ns);
67 hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
69 return 0;
72 static void vkms_disable_vblank(struct drm_crtc *crtc)
74 struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
76 hrtimer_cancel(&out->vblank_hrtimer);
79 bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
80 int *max_error, ktime_t *vblank_time,
81 bool in_vblank_irq)
83 struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
84 struct vkms_output *output = &vkmsdev->output;
85 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
87 *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
89 if (WARN_ON(*vblank_time == vblank->time))
90 return true;
93 * To prevent races we roll the hrtimer forward before we do any
94 * interrupt processing - this is how real hw works (the interrupt is
95 * only generated after all the vblank registers are updated) and what
96 * the vblank core expects. Therefore we need to always correct the
97 * timestampe by one frame.
99 *vblank_time -= output->period_ns;
101 return true;
104 static struct drm_crtc_state *
105 vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
107 struct vkms_crtc_state *vkms_state;
109 if (WARN_ON(!crtc->state))
110 return NULL;
112 vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
113 if (!vkms_state)
114 return NULL;
116 __drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
118 INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
120 return &vkms_state->base;
123 static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
124 struct drm_crtc_state *state)
126 struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
128 __drm_atomic_helper_crtc_destroy_state(state);
130 WARN_ON(work_pending(&vkms_state->composer_work));
131 kfree(vkms_state->active_planes);
132 kfree(vkms_state);
135 static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
137 struct vkms_crtc_state *vkms_state =
138 kzalloc(sizeof(*vkms_state), GFP_KERNEL);
140 if (crtc->state)
141 vkms_atomic_crtc_destroy_state(crtc, crtc->state);
143 __drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
144 if (vkms_state)
145 INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
148 static const struct drm_crtc_funcs vkms_crtc_funcs = {
149 .set_config = drm_atomic_helper_set_config,
150 .destroy = drm_crtc_cleanup,
151 .page_flip = drm_atomic_helper_page_flip,
152 .reset = vkms_atomic_crtc_reset,
153 .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
154 .atomic_destroy_state = vkms_atomic_crtc_destroy_state,
155 .enable_vblank = vkms_enable_vblank,
156 .disable_vblank = vkms_disable_vblank,
157 .get_crc_sources = vkms_get_crc_sources,
158 .set_crc_source = vkms_set_crc_source,
159 .verify_crc_source = vkms_verify_crc_source,
162 static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
163 struct drm_crtc_state *state)
165 struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
166 struct drm_plane *plane;
167 struct drm_plane_state *plane_state;
168 int i = 0, ret;
170 if (vkms_state->active_planes)
171 return 0;
173 ret = drm_atomic_add_affected_planes(state->state, crtc);
174 if (ret < 0)
175 return ret;
177 drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
178 plane_state = drm_atomic_get_existing_plane_state(state->state,
179 plane);
180 WARN_ON(!plane_state);
182 if (!plane_state->visible)
183 continue;
185 i++;
188 vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
189 if (!vkms_state->active_planes)
190 return -ENOMEM;
191 vkms_state->num_active_planes = i;
193 i = 0;
194 drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
195 plane_state = drm_atomic_get_existing_plane_state(state->state,
196 plane);
198 if (!plane_state->visible)
199 continue;
201 vkms_state->active_planes[i++] =
202 to_vkms_plane_state(plane_state);
205 return 0;
208 static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
209 struct drm_crtc_state *old_state)
211 drm_crtc_vblank_on(crtc);
214 static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
215 struct drm_crtc_state *old_state)
217 drm_crtc_vblank_off(crtc);
220 static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
221 struct drm_crtc_state *old_crtc_state)
223 struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
225 /* This lock is held across the atomic commit to block vblank timer
226 * from scheduling vkms_composer_worker until the composer is updated
228 spin_lock_irq(&vkms_output->lock);
231 static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
232 struct drm_crtc_state *old_crtc_state)
234 struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
236 if (crtc->state->event) {
237 spin_lock(&crtc->dev->event_lock);
239 if (drm_crtc_vblank_get(crtc) != 0)
240 drm_crtc_send_vblank_event(crtc, crtc->state->event);
241 else
242 drm_crtc_arm_vblank_event(crtc, crtc->state->event);
244 spin_unlock(&crtc->dev->event_lock);
246 crtc->state->event = NULL;
249 vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
251 spin_unlock_irq(&vkms_output->lock);
254 static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
255 .atomic_check = vkms_crtc_atomic_check,
256 .atomic_begin = vkms_crtc_atomic_begin,
257 .atomic_flush = vkms_crtc_atomic_flush,
258 .atomic_enable = vkms_crtc_atomic_enable,
259 .atomic_disable = vkms_crtc_atomic_disable,
262 int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
263 struct drm_plane *primary, struct drm_plane *cursor)
265 struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
266 int ret;
268 ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
269 &vkms_crtc_funcs, NULL);
270 if (ret) {
271 DRM_ERROR("Failed to init CRTC\n");
272 return ret;
275 drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
277 spin_lock_init(&vkms_out->lock);
278 spin_lock_init(&vkms_out->composer_lock);
280 vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
281 if (!vkms_out->composer_workq)
282 return -ENOMEM;
284 return ret;