1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Broadcom
9 * This is the general code for implementing KMS mode setting that
10 * doesn't clearly associate with any of the other objects (plane,
11 * crtc, HDMI encoder).
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_atomic_helper.h>
16 #include <drm/drm_crtc.h>
17 #include <drm/drm_gem_framebuffer_helper.h>
18 #include <drm/drm_plane_helper.h>
19 #include <drm/drm_probe_helper.h>
20 #include <drm/drm_vblank.h>
25 struct vc4_ctm_state
{
26 struct drm_private_state base
;
27 struct drm_color_ctm
*ctm
;
31 static struct vc4_ctm_state
*to_vc4_ctm_state(struct drm_private_state
*priv
)
33 return container_of(priv
, struct vc4_ctm_state
, base
);
36 struct vc4_load_tracker_state
{
37 struct drm_private_state base
;
42 static struct vc4_load_tracker_state
*
43 to_vc4_load_tracker_state(struct drm_private_state
*priv
)
45 return container_of(priv
, struct vc4_load_tracker_state
, base
);
48 static struct vc4_ctm_state
*vc4_get_ctm_state(struct drm_atomic_state
*state
,
49 struct drm_private_obj
*manager
)
51 struct drm_device
*dev
= state
->dev
;
52 struct vc4_dev
*vc4
= dev
->dev_private
;
53 struct drm_private_state
*priv_state
;
56 ret
= drm_modeset_lock(&vc4
->ctm_state_lock
, state
->acquire_ctx
);
60 priv_state
= drm_atomic_get_private_obj_state(state
, manager
);
61 if (IS_ERR(priv_state
))
62 return ERR_CAST(priv_state
);
64 return to_vc4_ctm_state(priv_state
);
67 static struct drm_private_state
*
68 vc4_ctm_duplicate_state(struct drm_private_obj
*obj
)
70 struct vc4_ctm_state
*state
;
72 state
= kmemdup(obj
->state
, sizeof(*state
), GFP_KERNEL
);
76 __drm_atomic_helper_private_obj_duplicate_state(obj
, &state
->base
);
81 static void vc4_ctm_destroy_state(struct drm_private_obj
*obj
,
82 struct drm_private_state
*state
)
84 struct vc4_ctm_state
*ctm_state
= to_vc4_ctm_state(state
);
89 static const struct drm_private_state_funcs vc4_ctm_state_funcs
= {
90 .atomic_duplicate_state
= vc4_ctm_duplicate_state
,
91 .atomic_destroy_state
= vc4_ctm_destroy_state
,
94 /* Converts a DRM S31.32 value to the HW S0.9 format. */
95 static u16
vc4_ctm_s31_32_to_s0_9(u64 in
)
100 r
= in
& BIT_ULL(63) ? BIT(9) : 0;
102 if ((in
& GENMASK_ULL(62, 32)) > 0) {
103 /* We have zero integer bits so we can only saturate here. */
106 /* Otherwise take the 9 most important fractional bits. */
107 r
|= (in
>> 23) & GENMASK(8, 0);
114 vc4_ctm_commit(struct vc4_dev
*vc4
, struct drm_atomic_state
*state
)
116 struct vc4_ctm_state
*ctm_state
= to_vc4_ctm_state(vc4
->ctm_manager
.state
);
117 struct drm_color_ctm
*ctm
= ctm_state
->ctm
;
119 if (ctm_state
->fifo
) {
120 HVS_WRITE(SCALER_OLEDCOEF2
,
121 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm
->matrix
[0]),
122 SCALER_OLEDCOEF2_R_TO_R
) |
123 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm
->matrix
[3]),
124 SCALER_OLEDCOEF2_R_TO_G
) |
125 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm
->matrix
[6]),
126 SCALER_OLEDCOEF2_R_TO_B
));
127 HVS_WRITE(SCALER_OLEDCOEF1
,
128 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm
->matrix
[1]),
129 SCALER_OLEDCOEF1_G_TO_R
) |
130 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm
->matrix
[4]),
131 SCALER_OLEDCOEF1_G_TO_G
) |
132 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm
->matrix
[7]),
133 SCALER_OLEDCOEF1_G_TO_B
));
134 HVS_WRITE(SCALER_OLEDCOEF0
,
135 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm
->matrix
[2]),
136 SCALER_OLEDCOEF0_B_TO_R
) |
137 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm
->matrix
[5]),
138 SCALER_OLEDCOEF0_B_TO_G
) |
139 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm
->matrix
[8]),
140 SCALER_OLEDCOEF0_B_TO_B
));
143 HVS_WRITE(SCALER_OLEDOFFS
,
144 VC4_SET_FIELD(ctm_state
->fifo
, SCALER_OLEDOFFS_DISPFIFO
));
148 vc4_atomic_complete_commit(struct drm_atomic_state
*state
)
150 struct drm_device
*dev
= state
->dev
;
151 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
152 struct vc4_crtc
*vc4_crtc
;
155 for (i
= 0; i
< dev
->mode_config
.num_crtc
; i
++) {
156 if (!state
->crtcs
[i
].ptr
|| !state
->crtcs
[i
].commit
)
159 vc4_crtc
= to_vc4_crtc(state
->crtcs
[i
].ptr
);
160 vc4_hvs_mask_underrun(dev
, vc4_crtc
->channel
);
163 drm_atomic_helper_wait_for_fences(dev
, state
, false);
165 drm_atomic_helper_wait_for_dependencies(state
);
167 drm_atomic_helper_commit_modeset_disables(dev
, state
);
169 vc4_ctm_commit(vc4
, state
);
171 drm_atomic_helper_commit_planes(dev
, state
, 0);
173 drm_atomic_helper_commit_modeset_enables(dev
, state
);
175 drm_atomic_helper_fake_vblank(state
);
177 drm_atomic_helper_commit_hw_done(state
);
179 drm_atomic_helper_wait_for_flip_done(dev
, state
);
181 drm_atomic_helper_cleanup_planes(dev
, state
);
183 drm_atomic_helper_commit_cleanup_done(state
);
185 drm_atomic_state_put(state
);
187 up(&vc4
->async_modeset
);
190 static void commit_work(struct work_struct
*work
)
192 struct drm_atomic_state
*state
= container_of(work
,
193 struct drm_atomic_state
,
195 vc4_atomic_complete_commit(state
);
199 * vc4_atomic_commit - commit validated state object
201 * @state: the driver state object
202 * @nonblock: nonblocking commit
204 * This function commits a with drm_atomic_helper_check() pre-validated state
205 * object. This can still fail when e.g. the framebuffer reservation fails. For
206 * now this doesn't implement asynchronous commits.
209 * Zero for success or -errno.
211 static int vc4_atomic_commit(struct drm_device
*dev
,
212 struct drm_atomic_state
*state
,
215 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
218 if (state
->async_update
) {
219 ret
= down_interruptible(&vc4
->async_modeset
);
223 ret
= drm_atomic_helper_prepare_planes(dev
, state
);
225 up(&vc4
->async_modeset
);
229 drm_atomic_helper_async_commit(dev
, state
);
231 drm_atomic_helper_cleanup_planes(dev
, state
);
233 up(&vc4
->async_modeset
);
238 /* We know for sure we don't want an async update here. Set
239 * state->legacy_cursor_update to false to prevent
240 * drm_atomic_helper_setup_commit() from auto-completing
243 state
->legacy_cursor_update
= false;
244 ret
= drm_atomic_helper_setup_commit(state
, nonblock
);
248 INIT_WORK(&state
->commit_work
, commit_work
);
250 ret
= down_interruptible(&vc4
->async_modeset
);
254 ret
= drm_atomic_helper_prepare_planes(dev
, state
);
256 up(&vc4
->async_modeset
);
261 ret
= drm_atomic_helper_wait_for_fences(dev
, state
, true);
263 drm_atomic_helper_cleanup_planes(dev
, state
);
264 up(&vc4
->async_modeset
);
270 * This is the point of no return - everything below never fails except
271 * when the hw goes bonghits. Which means we can commit the new state on
272 * the software side now.
275 BUG_ON(drm_atomic_helper_swap_state(state
, false) < 0);
278 * Everything below can be run asynchronously without the need to grab
279 * any modeset locks at all under one condition: It must be guaranteed
280 * that the asynchronous work has either been cancelled (if the driver
281 * supports it, which at least requires that the framebuffers get
282 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
283 * before the new state gets committed on the software side with
284 * drm_atomic_helper_swap_state().
286 * This scheme allows new atomic state updates to be prepared and
287 * checked in parallel to the asynchronous completion of the previous
288 * update. Which is important since compositors need to figure out the
289 * composition of the next frame right after having submitted the
293 drm_atomic_state_get(state
);
295 queue_work(system_unbound_wq
, &state
->commit_work
);
297 vc4_atomic_complete_commit(state
);
302 static struct drm_framebuffer
*vc4_fb_create(struct drm_device
*dev
,
303 struct drm_file
*file_priv
,
304 const struct drm_mode_fb_cmd2
*mode_cmd
)
306 struct drm_mode_fb_cmd2 mode_cmd_local
;
308 /* If the user didn't specify a modifier, use the
309 * vc4_set_tiling_ioctl() state for the BO.
311 if (!(mode_cmd
->flags
& DRM_MODE_FB_MODIFIERS
)) {
312 struct drm_gem_object
*gem_obj
;
315 gem_obj
= drm_gem_object_lookup(file_priv
,
316 mode_cmd
->handles
[0]);
318 DRM_DEBUG("Failed to look up GEM BO %d\n",
319 mode_cmd
->handles
[0]);
320 return ERR_PTR(-ENOENT
);
322 bo
= to_vc4_bo(gem_obj
);
324 mode_cmd_local
= *mode_cmd
;
327 mode_cmd_local
.modifier
[0] =
328 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED
;
330 mode_cmd_local
.modifier
[0] = DRM_FORMAT_MOD_NONE
;
333 drm_gem_object_put_unlocked(gem_obj
);
335 mode_cmd
= &mode_cmd_local
;
338 return drm_gem_fb_create(dev
, file_priv
, mode_cmd
);
341 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
342 * at a time and the HW only supports S0.9 scalars. To account for the latter,
343 * we don't allow userland to set a CTM that we have no hope of approximating.
346 vc4_ctm_atomic_check(struct drm_device
*dev
, struct drm_atomic_state
*state
)
348 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
349 struct vc4_ctm_state
*ctm_state
= NULL
;
350 struct drm_crtc
*crtc
;
351 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
352 struct drm_color_ctm
*ctm
;
355 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
356 /* CTM is being disabled. */
357 if (!new_crtc_state
->ctm
&& old_crtc_state
->ctm
) {
358 ctm_state
= vc4_get_ctm_state(state
, &vc4
->ctm_manager
);
359 if (IS_ERR(ctm_state
))
360 return PTR_ERR(ctm_state
);
365 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
366 if (new_crtc_state
->ctm
== old_crtc_state
->ctm
)
370 ctm_state
= vc4_get_ctm_state(state
, &vc4
->ctm_manager
);
371 if (IS_ERR(ctm_state
))
372 return PTR_ERR(ctm_state
);
375 /* CTM is being enabled or the matrix changed. */
376 if (new_crtc_state
->ctm
) {
377 /* fifo is 1-based since 0 disables CTM. */
378 int fifo
= to_vc4_crtc(crtc
)->channel
+ 1;
380 /* Check userland isn't trying to turn on CTM for more
381 * than one CRTC at a time.
383 if (ctm_state
->fifo
&& ctm_state
->fifo
!= fifo
) {
384 DRM_DEBUG_DRIVER("Too many CTM configured\n");
388 /* Check we can approximate the specified CTM.
389 * We disallow scalars |c| > 1.0 since the HW has
392 ctm
= new_crtc_state
->ctm
->data
;
393 for (i
= 0; i
< ARRAY_SIZE(ctm
->matrix
); i
++) {
394 u64 val
= ctm
->matrix
[i
];
397 if (val
> BIT_ULL(32))
401 ctm_state
->fifo
= fifo
;
402 ctm_state
->ctm
= ctm
;
409 static int vc4_load_tracker_atomic_check(struct drm_atomic_state
*state
)
411 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
412 struct vc4_dev
*vc4
= to_vc4_dev(state
->dev
);
413 struct vc4_load_tracker_state
*load_state
;
414 struct drm_private_state
*priv_state
;
415 struct drm_plane
*plane
;
418 priv_state
= drm_atomic_get_private_obj_state(state
,
420 if (IS_ERR(priv_state
))
421 return PTR_ERR(priv_state
);
423 load_state
= to_vc4_load_tracker_state(priv_state
);
424 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
,
425 new_plane_state
, i
) {
426 struct vc4_plane_state
*vc4_plane_state
;
428 if (old_plane_state
->fb
&& old_plane_state
->crtc
) {
429 vc4_plane_state
= to_vc4_plane_state(old_plane_state
);
430 load_state
->membus_load
-= vc4_plane_state
->membus_load
;
431 load_state
->hvs_load
-= vc4_plane_state
->hvs_load
;
434 if (new_plane_state
->fb
&& new_plane_state
->crtc
) {
435 vc4_plane_state
= to_vc4_plane_state(new_plane_state
);
436 load_state
->membus_load
+= vc4_plane_state
->membus_load
;
437 load_state
->hvs_load
+= vc4_plane_state
->hvs_load
;
441 /* Don't check the load when the tracker is disabled. */
442 if (!vc4
->load_tracker_enabled
)
445 /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
446 * the system work when other blocks are accessing the memory.
448 if (load_state
->membus_load
> SZ_1G
+ SZ_512M
)
451 /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
452 * consider the maximum number of cycles is 240M.
454 if (load_state
->hvs_load
> 240000000ULL)
460 static struct drm_private_state
*
461 vc4_load_tracker_duplicate_state(struct drm_private_obj
*obj
)
463 struct vc4_load_tracker_state
*state
;
465 state
= kmemdup(obj
->state
, sizeof(*state
), GFP_KERNEL
);
469 __drm_atomic_helper_private_obj_duplicate_state(obj
, &state
->base
);
474 static void vc4_load_tracker_destroy_state(struct drm_private_obj
*obj
,
475 struct drm_private_state
*state
)
477 struct vc4_load_tracker_state
*load_state
;
479 load_state
= to_vc4_load_tracker_state(state
);
483 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs
= {
484 .atomic_duplicate_state
= vc4_load_tracker_duplicate_state
,
485 .atomic_destroy_state
= vc4_load_tracker_destroy_state
,
489 vc4_atomic_check(struct drm_device
*dev
, struct drm_atomic_state
*state
)
493 ret
= vc4_ctm_atomic_check(dev
, state
);
497 ret
= drm_atomic_helper_check(dev
, state
);
501 return vc4_load_tracker_atomic_check(state
);
504 static const struct drm_mode_config_funcs vc4_mode_funcs
= {
505 .atomic_check
= vc4_atomic_check
,
506 .atomic_commit
= vc4_atomic_commit
,
507 .fb_create
= vc4_fb_create
,
510 int vc4_kms_load(struct drm_device
*dev
)
512 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
513 struct vc4_ctm_state
*ctm_state
;
514 struct vc4_load_tracker_state
*load_state
;
517 /* Start with the load tracker enabled. Can be disabled through the
518 * debugfs load_tracker file.
520 vc4
->load_tracker_enabled
= true;
522 sema_init(&vc4
->async_modeset
, 1);
524 /* Set support for vblank irq fast disable, before drm_vblank_init() */
525 dev
->vblank_disable_immediate
= true;
527 dev
->irq_enabled
= true;
528 ret
= drm_vblank_init(dev
, dev
->mode_config
.num_crtc
);
530 dev_err(dev
->dev
, "failed to initialize vblank\n");
534 dev
->mode_config
.max_width
= 2048;
535 dev
->mode_config
.max_height
= 2048;
536 dev
->mode_config
.funcs
= &vc4_mode_funcs
;
537 dev
->mode_config
.preferred_depth
= 24;
538 dev
->mode_config
.async_page_flip
= true;
539 dev
->mode_config
.allow_fb_modifiers
= true;
541 drm_modeset_lock_init(&vc4
->ctm_state_lock
);
543 ctm_state
= kzalloc(sizeof(*ctm_state
), GFP_KERNEL
);
547 drm_atomic_private_obj_init(dev
, &vc4
->ctm_manager
, &ctm_state
->base
,
548 &vc4_ctm_state_funcs
);
550 load_state
= kzalloc(sizeof(*load_state
), GFP_KERNEL
);
552 drm_atomic_private_obj_fini(&vc4
->ctm_manager
);
556 drm_atomic_private_obj_init(dev
, &vc4
->load_tracker
, &load_state
->base
,
557 &vc4_load_tracker_state_funcs
);
559 drm_mode_config_reset(dev
);
561 drm_kms_helper_poll_init(dev
);