2 * rcar_du_kms.c -- R-Car Display Unit Mode Setting
4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_atomic_helper.h>
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_crtc_helper.h>
19 #include <drm/drm_fb_cma_helper.h>
20 #include <drm/drm_gem_cma_helper.h>
22 #include <linux/of_graph.h>
23 #include <linux/wait.h>
25 #include "rcar_du_crtc.h"
26 #include "rcar_du_drv.h"
27 #include "rcar_du_encoder.h"
28 #include "rcar_du_kms.h"
29 #include "rcar_du_lvdsenc.h"
30 #include "rcar_du_regs.h"
32 /* -----------------------------------------------------------------------------
36 static const struct rcar_du_format_info rcar_du_format_infos
[] = {
38 .fourcc
= DRM_FORMAT_RGB565
,
41 .pnmr
= PnMR_SPIM_TP
| PnMR_DDDF_16BPP
,
42 .edf
= PnDDCR4_EDF_NONE
,
44 .fourcc
= DRM_FORMAT_ARGB1555
,
47 .pnmr
= PnMR_SPIM_ALP
| PnMR_DDDF_ARGB
,
48 .edf
= PnDDCR4_EDF_NONE
,
50 .fourcc
= DRM_FORMAT_XRGB1555
,
53 .pnmr
= PnMR_SPIM_ALP
| PnMR_DDDF_ARGB
,
54 .edf
= PnDDCR4_EDF_NONE
,
56 .fourcc
= DRM_FORMAT_XRGB8888
,
59 .pnmr
= PnMR_SPIM_TP
| PnMR_DDDF_16BPP
,
60 .edf
= PnDDCR4_EDF_RGB888
,
62 .fourcc
= DRM_FORMAT_ARGB8888
,
65 .pnmr
= PnMR_SPIM_ALP
| PnMR_DDDF_16BPP
,
66 .edf
= PnDDCR4_EDF_ARGB8888
,
68 .fourcc
= DRM_FORMAT_UYVY
,
71 .pnmr
= PnMR_SPIM_TP_OFF
| PnMR_DDDF_YC
,
72 .edf
= PnDDCR4_EDF_NONE
,
74 .fourcc
= DRM_FORMAT_YUYV
,
77 .pnmr
= PnMR_SPIM_TP_OFF
| PnMR_DDDF_YC
,
78 .edf
= PnDDCR4_EDF_NONE
,
80 .fourcc
= DRM_FORMAT_NV12
,
83 .pnmr
= PnMR_SPIM_TP_OFF
| PnMR_DDDF_YC
,
84 .edf
= PnDDCR4_EDF_NONE
,
86 .fourcc
= DRM_FORMAT_NV21
,
89 .pnmr
= PnMR_SPIM_TP_OFF
| PnMR_DDDF_YC
,
90 .edf
= PnDDCR4_EDF_NONE
,
92 /* In YUV 4:2:2, only NV16 is supported (NV61 isn't) */
93 .fourcc
= DRM_FORMAT_NV16
,
96 .pnmr
= PnMR_SPIM_TP_OFF
| PnMR_DDDF_YC
,
97 .edf
= PnDDCR4_EDF_NONE
,
101 const struct rcar_du_format_info
*rcar_du_format_info(u32 fourcc
)
105 for (i
= 0; i
< ARRAY_SIZE(rcar_du_format_infos
); ++i
) {
106 if (rcar_du_format_infos
[i
].fourcc
== fourcc
)
107 return &rcar_du_format_infos
[i
];
113 /* -----------------------------------------------------------------------------
117 int rcar_du_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
118 struct drm_mode_create_dumb
*args
)
120 struct rcar_du_device
*rcdu
= dev
->dev_private
;
121 unsigned int min_pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
124 /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
125 * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
127 if (rcar_du_needs(rcdu
, RCAR_DU_QUIRK_ALIGN_128B
))
130 align
= 16 * args
->bpp
/ 8;
132 args
->pitch
= roundup(min_pitch
, align
);
134 return drm_gem_cma_dumb_create_internal(file
, dev
, args
);
137 static struct drm_framebuffer
*
138 rcar_du_fb_create(struct drm_device
*dev
, struct drm_file
*file_priv
,
139 struct drm_mode_fb_cmd2
*mode_cmd
)
141 struct rcar_du_device
*rcdu
= dev
->dev_private
;
142 const struct rcar_du_format_info
*format
;
143 unsigned int max_pitch
;
147 format
= rcar_du_format_info(mode_cmd
->pixel_format
);
148 if (format
== NULL
) {
149 dev_dbg(dev
->dev
, "unsupported pixel format %08x\n",
150 mode_cmd
->pixel_format
);
151 return ERR_PTR(-EINVAL
);
155 * The pitch and alignment constraints are expressed in pixels on the
156 * hardware side and in bytes in the DRM API.
158 bpp
= format
->planes
== 2 ? 1 : format
->bpp
/ 8;
159 max_pitch
= 4096 * bpp
;
161 if (rcar_du_needs(rcdu
, RCAR_DU_QUIRK_ALIGN_128B
))
166 if (mode_cmd
->pitches
[0] & (align
- 1) ||
167 mode_cmd
->pitches
[0] >= max_pitch
) {
168 dev_dbg(dev
->dev
, "invalid pitch value %u\n",
169 mode_cmd
->pitches
[0]);
170 return ERR_PTR(-EINVAL
);
173 if (format
->planes
== 2) {
174 if (mode_cmd
->pitches
[1] != mode_cmd
->pitches
[0]) {
176 "luma and chroma pitches do not match\n");
177 return ERR_PTR(-EINVAL
);
181 return drm_fb_cma_create(dev
, file_priv
, mode_cmd
);
184 static void rcar_du_output_poll_changed(struct drm_device
*dev
)
186 struct rcar_du_device
*rcdu
= dev
->dev_private
;
188 drm_fbdev_cma_hotplug_event(rcdu
->fbdev
);
191 /* -----------------------------------------------------------------------------
192 * Atomic Check and Update
196 * Atomic hardware plane allocator
198 * The hardware plane allocator is solely based on the atomic plane states
199 * without keeping any external state to avoid races between .atomic_check()
200 * and .atomic_commit().
202 * The core idea is to avoid using a free planes bitmask that would need to be
203 * shared between check and commit handlers with a collective knowledge based on
204 * the allocated hardware plane(s) for each KMS plane. The allocator then loops
205 * over all plane states to compute the free planes bitmask, allocates hardware
206 * planes based on that bitmask, and stores the result back in the plane states.
208 * For this to work we need to access the current state of planes not touched by
209 * the atomic update. To ensure that it won't be modified, we need to lock all
210 * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
211 * updates from .atomic_check() up to completion (when swapping the states if
212 * the check step has succeeded) or rollback (when freeing the states if the
213 * check step has failed).
215 * Allocation is performed in the .atomic_check() handler and applied
216 * automatically when the core swaps the old and new states.
219 static bool rcar_du_plane_needs_realloc(struct rcar_du_plane
*plane
,
220 struct rcar_du_plane_state
*state
)
222 const struct rcar_du_format_info
*cur_format
;
224 cur_format
= to_rcar_du_plane_state(plane
->plane
.state
)->format
;
226 /* Lowering the number of planes doesn't strictly require reallocation
227 * as the extra hardware plane will be freed when committing, but doing
228 * so could lead to more fragmentation.
230 return !cur_format
|| cur_format
->planes
!= state
->format
->planes
;
233 static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state
*state
)
237 if (state
->hwindex
== -1)
240 mask
= 1 << state
->hwindex
;
241 if (state
->format
->planes
== 2)
242 mask
|= 1 << ((state
->hwindex
+ 1) % 8);
247 static int rcar_du_plane_hwalloc(unsigned int num_planes
, unsigned int free
)
251 for (i
= 0; i
< RCAR_DU_NUM_HW_PLANES
; ++i
) {
252 if (!(free
& (1 << i
)))
255 if (num_planes
== 1 || free
& (1 << ((i
+ 1) % 8)))
259 return i
== RCAR_DU_NUM_HW_PLANES
? -EBUSY
: i
;
262 static int rcar_du_atomic_check(struct drm_device
*dev
,
263 struct drm_atomic_state
*state
)
265 struct rcar_du_device
*rcdu
= dev
->dev_private
;
266 unsigned int group_freed_planes
[RCAR_DU_MAX_GROUPS
] = { 0, };
267 unsigned int group_free_planes
[RCAR_DU_MAX_GROUPS
] = { 0, };
268 bool needs_realloc
= false;
269 unsigned int groups
= 0;
273 ret
= drm_atomic_helper_check(dev
, state
);
277 /* Check if hardware planes need to be reallocated. */
278 for (i
= 0; i
< dev
->mode_config
.num_total_plane
; ++i
) {
279 struct rcar_du_plane_state
*plane_state
;
280 struct rcar_du_plane
*plane
;
283 if (!state
->planes
[i
])
286 plane
= to_rcar_plane(state
->planes
[i
]);
287 plane_state
= to_rcar_du_plane_state(state
->plane_states
[i
]);
289 /* If the plane is being disabled we don't need to go through
290 * the full reallocation procedure. Just mark the hardware
293 if (!plane_state
->format
) {
294 index
= plane
- plane
->group
->planes
.planes
;
295 group_freed_planes
[plane
->group
->index
] |= 1 << index
;
296 plane_state
->hwindex
= -1;
300 /* If the plane needs to be reallocated mark it as such, and
301 * mark the hardware plane(s) as free.
303 if (rcar_du_plane_needs_realloc(plane
, plane_state
)) {
304 groups
|= 1 << plane
->group
->index
;
305 needs_realloc
= true;
307 index
= plane
- plane
->group
->planes
.planes
;
308 group_freed_planes
[plane
->group
->index
] |= 1 << index
;
309 plane_state
->hwindex
= -1;
316 /* Grab all plane states for the groups that need reallocation to ensure
317 * locking and avoid racy updates. This serializes the update operation,
318 * but there's not much we can do about it as that's the hardware
321 * Compute the used planes mask for each group at the same time to avoid
322 * looping over the planes separately later.
325 unsigned int index
= ffs(groups
) - 1;
326 struct rcar_du_group
*group
= &rcdu
->groups
[index
];
327 unsigned int used_planes
= 0;
329 for (i
= 0; i
< RCAR_DU_NUM_KMS_PLANES
; ++i
) {
330 struct rcar_du_plane
*plane
= &group
->planes
.planes
[i
];
331 struct rcar_du_plane_state
*plane_state
;
332 struct drm_plane_state
*s
;
334 s
= drm_atomic_get_plane_state(state
, &plane
->plane
);
338 /* If the plane has been freed in the above loop its
339 * hardware planes must not be added to the used planes
340 * bitmask. However, the current state doesn't reflect
341 * the free state yet, as we've modified the new state
342 * above. Use the local freed planes list to check for
343 * that condition instead.
345 if (group_freed_planes
[index
] & (1 << i
))
348 plane_state
= to_rcar_du_plane_state(plane
->plane
.state
);
349 used_planes
|= rcar_du_plane_hwmask(plane_state
);
352 group_free_planes
[index
] = 0xff & ~used_planes
;
353 groups
&= ~(1 << index
);
356 /* Reallocate hardware planes for each plane that needs it. */
357 for (i
= 0; i
< dev
->mode_config
.num_total_plane
; ++i
) {
358 struct rcar_du_plane_state
*plane_state
;
359 struct rcar_du_plane
*plane
;
362 if (!state
->planes
[i
])
365 plane
= to_rcar_plane(state
->planes
[i
]);
366 plane_state
= to_rcar_du_plane_state(state
->plane_states
[i
]);
368 /* Skip planes that are being disabled or don't need to be
371 if (!plane_state
->format
||
372 !rcar_du_plane_needs_realloc(plane
, plane_state
))
375 idx
= rcar_du_plane_hwalloc(plane_state
->format
->planes
,
376 group_free_planes
[plane
->group
->index
]);
378 dev_dbg(rcdu
->dev
, "%s: no available hardware plane\n",
383 plane_state
->hwindex
= idx
;
385 group_free_planes
[plane
->group
->index
] &=
386 ~rcar_du_plane_hwmask(plane_state
);
392 struct rcar_du_commit
{
393 struct work_struct work
;
394 struct drm_device
*dev
;
395 struct drm_atomic_state
*state
;
399 static void rcar_du_atomic_complete(struct rcar_du_commit
*commit
)
401 struct drm_device
*dev
= commit
->dev
;
402 struct rcar_du_device
*rcdu
= dev
->dev_private
;
403 struct drm_atomic_state
*old_state
= commit
->state
;
405 /* Apply the atomic update. */
406 drm_atomic_helper_commit_modeset_disables(dev
, old_state
);
407 drm_atomic_helper_commit_modeset_enables(dev
, old_state
);
408 drm_atomic_helper_commit_planes(dev
, old_state
);
410 drm_atomic_helper_wait_for_vblanks(dev
, old_state
);
412 drm_atomic_helper_cleanup_planes(dev
, old_state
);
414 drm_atomic_state_free(old_state
);
416 /* Complete the commit, wake up any waiter. */
417 spin_lock(&rcdu
->commit
.wait
.lock
);
418 rcdu
->commit
.pending
&= ~commit
->crtcs
;
419 wake_up_all_locked(&rcdu
->commit
.wait
);
420 spin_unlock(&rcdu
->commit
.wait
.lock
);
425 static void rcar_du_atomic_work(struct work_struct
*work
)
427 struct rcar_du_commit
*commit
=
428 container_of(work
, struct rcar_du_commit
, work
);
430 rcar_du_atomic_complete(commit
);
433 static int rcar_du_atomic_commit(struct drm_device
*dev
,
434 struct drm_atomic_state
*state
, bool async
)
436 struct rcar_du_device
*rcdu
= dev
->dev_private
;
437 struct rcar_du_commit
*commit
;
441 ret
= drm_atomic_helper_prepare_planes(dev
, state
);
445 /* Allocate the commit object. */
446 commit
= kzalloc(sizeof(*commit
), GFP_KERNEL
);
450 INIT_WORK(&commit
->work
, rcar_du_atomic_work
);
452 commit
->state
= state
;
454 /* Wait until all affected CRTCs have completed previous commits and
455 * mark them as pending.
457 for (i
= 0; i
< dev
->mode_config
.num_crtc
; ++i
) {
459 commit
->crtcs
|= 1 << drm_crtc_index(state
->crtcs
[i
]);
462 spin_lock(&rcdu
->commit
.wait
.lock
);
463 ret
= wait_event_interruptible_locked(rcdu
->commit
.wait
,
464 !(rcdu
->commit
.pending
& commit
->crtcs
));
466 rcdu
->commit
.pending
|= commit
->crtcs
;
467 spin_unlock(&rcdu
->commit
.wait
.lock
);
474 /* Swap the state, this is the point of no return. */
475 drm_atomic_helper_swap_state(dev
, state
);
478 schedule_work(&commit
->work
);
480 rcar_du_atomic_complete(commit
);
485 /* -----------------------------------------------------------------------------
489 static const struct drm_mode_config_funcs rcar_du_mode_config_funcs
= {
490 .fb_create
= rcar_du_fb_create
,
491 .output_poll_changed
= rcar_du_output_poll_changed
,
492 .atomic_check
= rcar_du_atomic_check
,
493 .atomic_commit
= rcar_du_atomic_commit
,
496 static int rcar_du_encoders_init_one(struct rcar_du_device
*rcdu
,
497 enum rcar_du_output output
,
498 struct of_endpoint
*ep
)
500 static const struct {
501 const char *compatible
;
502 enum rcar_du_encoder_type type
;
504 { "adi,adv7123", RCAR_DU_ENCODER_VGA
},
505 { "adi,adv7511w", RCAR_DU_ENCODER_HDMI
},
506 { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS
},
509 enum rcar_du_encoder_type enc_type
= RCAR_DU_ENCODER_NONE
;
510 struct device_node
*connector
= NULL
;
511 struct device_node
*encoder
= NULL
;
512 struct device_node
*ep_node
= NULL
;
513 struct device_node
*entity_ep_node
;
514 struct device_node
*entity
;
518 * Locate the connected entity and infer its type from the number of
521 entity
= of_graph_get_remote_port_parent(ep
->local_node
);
523 dev_dbg(rcdu
->dev
, "unconnected endpoint %s, skipping\n",
524 ep
->local_node
->full_name
);
528 entity_ep_node
= of_parse_phandle(ep
->local_node
, "remote-endpoint", 0);
530 for_each_endpoint_of_node(entity
, ep_node
) {
531 if (ep_node
== entity_ep_node
)
535 * We've found one endpoint other than the input, this must
536 * be an encoder. Locate the connector.
539 connector
= of_graph_get_remote_port_parent(ep_node
);
540 of_node_put(ep_node
);
544 "no connector for encoder %s, skipping\n",
546 of_node_put(entity_ep_node
);
547 of_node_put(encoder
);
554 of_node_put(entity_ep_node
);
558 * If an encoder has been found, get its type based on its
563 for (i
= 0; i
< ARRAY_SIZE(encoders
); ++i
) {
564 if (of_device_is_compatible(encoder
,
565 encoders
[i
].compatible
)) {
566 enc_type
= encoders
[i
].type
;
571 if (i
== ARRAY_SIZE(encoders
)) {
573 "unknown encoder type for %s, skipping\n",
575 of_node_put(encoder
);
576 of_node_put(connector
);
581 * If no encoder has been found the entity must be the
587 ret
= rcar_du_encoder_init(rcdu
, enc_type
, output
, encoder
, connector
);
588 of_node_put(encoder
);
589 of_node_put(connector
);
591 return ret
< 0 ? ret
: 1;
594 static int rcar_du_encoders_init(struct rcar_du_device
*rcdu
)
596 struct device_node
*np
= rcdu
->dev
->of_node
;
597 struct device_node
*ep_node
;
598 unsigned int num_encoders
= 0;
601 * Iterate over the endpoints and create one encoder for each output
604 for_each_endpoint_of_node(np
, ep_node
) {
605 enum rcar_du_output output
;
606 struct of_endpoint ep
;
610 ret
= of_graph_parse_endpoint(ep_node
, &ep
);
612 of_node_put(ep_node
);
616 /* Find the output route corresponding to the port number. */
617 for (i
= 0; i
< RCAR_DU_OUTPUT_MAX
; ++i
) {
618 if (rcdu
->info
->routes
[i
].possible_crtcs
&&
619 rcdu
->info
->routes
[i
].port
== ep
.port
) {
625 if (i
== RCAR_DU_OUTPUT_MAX
) {
627 "port %u references unexisting output, skipping\n",
632 /* Process the output pipeline. */
633 ret
= rcar_du_encoders_init_one(rcdu
, output
, &ep
);
635 if (ret
== -EPROBE_DEFER
) {
636 of_node_put(ep_node
);
641 "encoder initialization failed, skipping\n");
651 int rcar_du_modeset_init(struct rcar_du_device
*rcdu
)
653 static const unsigned int mmio_offsets
[] = {
654 DU0_REG_OFFSET
, DU2_REG_OFFSET
657 struct drm_device
*dev
= rcdu
->ddev
;
658 struct drm_encoder
*encoder
;
659 struct drm_fbdev_cma
*fbdev
;
660 unsigned int num_encoders
;
661 unsigned int num_groups
;
665 drm_mode_config_init(dev
);
667 dev
->mode_config
.min_width
= 0;
668 dev
->mode_config
.min_height
= 0;
669 dev
->mode_config
.max_width
= 4095;
670 dev
->mode_config
.max_height
= 2047;
671 dev
->mode_config
.funcs
= &rcar_du_mode_config_funcs
;
673 rcdu
->num_crtcs
= rcdu
->info
->num_crtcs
;
675 /* Initialize the groups. */
676 num_groups
= DIV_ROUND_UP(rcdu
->num_crtcs
, 2);
678 for (i
= 0; i
< num_groups
; ++i
) {
679 struct rcar_du_group
*rgrp
= &rcdu
->groups
[i
];
681 mutex_init(&rgrp
->lock
);
684 rgrp
->mmio_offset
= mmio_offsets
[i
];
687 ret
= rcar_du_planes_init(rgrp
);
692 /* Create the CRTCs. */
693 for (i
= 0; i
< rcdu
->num_crtcs
; ++i
) {
694 struct rcar_du_group
*rgrp
= &rcdu
->groups
[i
/ 2];
696 ret
= rcar_du_crtc_create(rgrp
, i
);
701 /* Initialize the encoders. */
702 ret
= rcar_du_lvdsenc_init(rcdu
);
706 ret
= rcar_du_encoders_init(rcdu
);
711 dev_err(rcdu
->dev
, "error: no encoder could be initialized\n");
717 /* Set the possible CRTCs and possible clones. There's always at least
718 * one way for all encoders to clone each other, set all bits in the
719 * possible clones field.
721 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
722 struct rcar_du_encoder
*renc
= to_rcar_encoder(encoder
);
723 const struct rcar_du_output_routing
*route
=
724 &rcdu
->info
->routes
[renc
->output
];
726 encoder
->possible_crtcs
= route
->possible_crtcs
;
727 encoder
->possible_clones
= (1 << num_encoders
) - 1;
730 drm_mode_config_reset(dev
);
732 drm_kms_helper_poll_init(dev
);
734 if (dev
->mode_config
.num_connector
) {
735 fbdev
= drm_fbdev_cma_init(dev
, 32, dev
->mode_config
.num_crtc
,
736 dev
->mode_config
.num_connector
);
738 return PTR_ERR(fbdev
);
743 "no connector found, disabling fbdev emulation\n");