perf bpf: Move perf_event_output() from stdio.h to bpf.h
[linux/fpc-iii.git] / drivers / gpu / drm / rcar-du / rcar_du_crtc.c
blob17741843cf519be7769047c24db32732551f7d24
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * rcar_du_crtc.c -- R-Car Display Unit CRTCs
5 * Copyright (C) 2013-2015 Renesas Electronics Corporation
7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
8 */
10 #include <linux/clk.h>
11 #include <linux/mutex.h>
12 #include <linux/sys_soc.h>
14 #include <drm/drmP.h>
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_atomic_helper.h>
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_crtc_helper.h>
19 #include <drm/drm_fb_cma_helper.h>
20 #include <drm/drm_gem_cma_helper.h>
21 #include <drm/drm_plane_helper.h>
23 #include "rcar_du_crtc.h"
24 #include "rcar_du_drv.h"
25 #include "rcar_du_kms.h"
26 #include "rcar_du_plane.h"
27 #include "rcar_du_regs.h"
28 #include "rcar_du_vsp.h"
30 static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
32 struct rcar_du_device *rcdu = rcrtc->group->dev;
34 return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
37 static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
39 struct rcar_du_device *rcdu = rcrtc->group->dev;
41 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
44 static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
46 struct rcar_du_device *rcdu = rcrtc->group->dev;
48 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
49 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
52 static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
54 struct rcar_du_device *rcdu = rcrtc->group->dev;
56 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
57 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
60 void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set)
62 struct rcar_du_device *rcdu = rcrtc->group->dev;
64 rcrtc->dsysr = (rcrtc->dsysr & ~clr) | set;
65 rcar_du_write(rcdu, rcrtc->mmio_offset + DSYSR, rcrtc->dsysr);
68 /* -----------------------------------------------------------------------------
69 * Hardware Setup
72 struct dpll_info {
73 unsigned int output;
74 unsigned int fdpll;
75 unsigned int n;
76 unsigned int m;
79 static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
80 struct dpll_info *dpll,
81 unsigned long input,
82 unsigned long target)
84 unsigned long best_diff = (unsigned long)-1;
85 unsigned long diff;
86 unsigned int fdpll;
87 unsigned int m;
88 unsigned int n;
91 * fin fvco fout fclkout
92 * in --> [1/M] --> |PD| -> [LPF] -> [VCO] -> [1/P] -+-> [1/FDPLL] -> out
93 * +-> | | |
94 * | |
95 * +---------------- [1/N] <------------+
97 * fclkout = fvco / P / FDPLL -- (1)
99 * fin/M = fvco/P/N
101 * fvco = fin * P * N / M -- (2)
103 * (1) + (2) indicates
105 * fclkout = fin * N / M / FDPLL
107 * NOTES
108 * N : (n + 1)
109 * M : (m + 1)
110 * FDPLL : (fdpll + 1)
111 * P : 2
112 * 2kHz < fvco < 4096MHz
114 * To minimize the jitter,
115 * N : as large as possible
116 * M : as small as possible
118 for (m = 0; m < 4; m++) {
119 for (n = 119; n > 38; n--) {
121 * This code only runs on 64-bit architectures, the
122 * unsigned long type can thus be used for 64-bit
123 * computation. It will still compile without any
124 * warning on 32-bit architectures.
126 * To optimize calculations, use fout instead of fvco
127 * to verify the VCO frequency constraint.
129 unsigned long fout = input * (n + 1) / (m + 1);
131 if (fout < 1000 || fout > 2048 * 1000 * 1000U)
132 continue;
134 for (fdpll = 1; fdpll < 32; fdpll++) {
135 unsigned long output;
137 output = fout / (fdpll + 1);
138 if (output >= 400 * 1000 * 1000)
139 continue;
141 diff = abs((long)output - (long)target);
142 if (best_diff > diff) {
143 best_diff = diff;
144 dpll->n = n;
145 dpll->m = m;
146 dpll->fdpll = fdpll;
147 dpll->output = output;
150 if (diff == 0)
151 goto done;
156 done:
157 dev_dbg(rcrtc->group->dev->dev,
158 "output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n",
159 dpll->output, dpll->fdpll, dpll->n, dpll->m,
160 best_diff);
163 struct du_clk_params {
164 struct clk *clk;
165 unsigned long rate;
166 unsigned long diff;
167 u32 escr;
170 static void rcar_du_escr_divider(struct clk *clk, unsigned long target,
171 u32 escr, struct du_clk_params *params)
173 unsigned long rate;
174 unsigned long diff;
175 u32 div;
178 * If the target rate has already been achieved perfectly we can't do
179 * better.
181 if (params->diff == 0)
182 return;
185 * Compute the input clock rate and internal divisor values to obtain
186 * the clock rate closest to the target frequency.
188 rate = clk_round_rate(clk, target);
189 div = clamp(DIV_ROUND_CLOSEST(rate, target), 1UL, 64UL) - 1;
190 diff = abs(rate / (div + 1) - target);
193 * Store the parameters if the resulting frequency is better than any
194 * previously calculated value.
196 if (diff < params->diff) {
197 params->clk = clk;
198 params->rate = rate;
199 params->diff = diff;
200 params->escr = escr | div;
204 static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
205 { .soc_id = "r8a7795", .revision = "ES1.*" },
206 { /* sentinel */ }
209 static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
211 const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
212 struct rcar_du_device *rcdu = rcrtc->group->dev;
213 unsigned long mode_clock = mode->clock * 1000;
214 u32 dsmr;
215 u32 escr;
217 if (rcdu->info->dpll_mask & (1 << rcrtc->index)) {
218 unsigned long target = mode_clock;
219 struct dpll_info dpll = { 0 };
220 unsigned long extclk;
221 u32 dpllcr;
222 u32 div = 0;
225 * DU channels that have a display PLL can't use the internal
226 * system clock, and have no internal clock divider.
229 if (WARN_ON(!rcrtc->extclock))
230 return;
233 * The H3 ES1.x exhibits dot clock duty cycle stability issues.
234 * We can work around them by configuring the DPLL to twice the
235 * desired frequency, coupled with a /2 post-divider. Restrict
236 * the workaround to H3 ES1.x as ES2.0 and all other SoCs have
237 * no post-divider when a display PLL is present (as shown by
238 * the workaround breaking HDMI output on M3-W during testing).
240 if (soc_device_match(rcar_du_r8a7795_es1)) {
241 target *= 2;
242 div = 1;
245 extclk = clk_get_rate(rcrtc->extclock);
246 rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
248 dpllcr = DPLLCR_CODE | DPLLCR_CLKE
249 | DPLLCR_FDPLL(dpll.fdpll)
250 | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
251 | DPLLCR_STBY;
253 if (rcrtc->index == 1)
254 dpllcr |= DPLLCR_PLCS1
255 | DPLLCR_INCS_DOTCLKIN1;
256 else
257 dpllcr |= DPLLCR_PLCS0
258 | DPLLCR_INCS_DOTCLKIN0;
260 rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
262 escr = ESCR_DCLKSEL_DCLKIN | div;
263 } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
265 * Use the LVDS PLL output as the dot clock when outputting to
266 * the LVDS encoder on an SoC that supports this clock routing
267 * option. We use the clock directly in that case, without any
268 * additional divider.
270 escr = ESCR_DCLKSEL_DCLKIN;
271 } else {
272 struct du_clk_params params = { .diff = (unsigned long)-1 };
274 rcar_du_escr_divider(rcrtc->clock, mode_clock,
275 ESCR_DCLKSEL_CLKS, &params);
276 if (rcrtc->extclock)
277 rcar_du_escr_divider(rcrtc->extclock, mode_clock,
278 ESCR_DCLKSEL_DCLKIN, &params);
280 dev_dbg(rcrtc->group->dev->dev, "mode clock %lu %s rate %lu\n",
281 mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext",
282 params.rate);
284 clk_set_rate(params.clk, params.rate);
285 escr = params.escr;
288 dev_dbg(rcrtc->group->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
290 rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
291 rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
293 /* Signal polarities */
294 dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
295 | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
296 | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? DSMR_ODEV : 0)
297 | DSMR_DIPM_DISP | DSMR_CSPM;
298 rcar_du_crtc_write(rcrtc, DSMR, dsmr);
300 /* Display timings */
301 rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19);
302 rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
303 mode->hdisplay - 19);
304 rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
305 mode->hsync_start - 1);
306 rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1);
308 rcar_du_crtc_write(rcrtc, VDSR, mode->crtc_vtotal -
309 mode->crtc_vsync_end - 2);
310 rcar_du_crtc_write(rcrtc, VDER, mode->crtc_vtotal -
311 mode->crtc_vsync_end +
312 mode->crtc_vdisplay - 2);
313 rcar_du_crtc_write(rcrtc, VSPR, mode->crtc_vtotal -
314 mode->crtc_vsync_end +
315 mode->crtc_vsync_start - 1);
316 rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
318 rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
319 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
322 void rcar_du_crtc_route_output(struct drm_crtc *crtc,
323 enum rcar_du_output output)
325 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
326 struct rcar_du_device *rcdu = rcrtc->group->dev;
329 * Store the route from the CRTC output to the DU output. The DU will be
330 * configured when starting the CRTC.
332 rcrtc->outputs |= BIT(output);
335 * Store RGB routing to DPAD0, the hardware will be configured when
336 * starting the CRTC.
338 if (output == RCAR_DU_OUTPUT_DPAD0)
339 rcdu->dpad0_source = rcrtc->index;
342 static unsigned int plane_zpos(struct rcar_du_plane *plane)
344 return plane->plane.state->normalized_zpos;
347 static const struct rcar_du_format_info *
348 plane_format(struct rcar_du_plane *plane)
350 return to_rcar_plane_state(plane->plane.state)->format;
353 static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
355 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
356 struct rcar_du_device *rcdu = rcrtc->group->dev;
357 unsigned int num_planes = 0;
358 unsigned int dptsr_planes;
359 unsigned int hwplanes = 0;
360 unsigned int prio = 0;
361 unsigned int i;
362 u32 dspr = 0;
364 for (i = 0; i < rcrtc->group->num_planes; ++i) {
365 struct rcar_du_plane *plane = &rcrtc->group->planes[i];
366 unsigned int j;
368 if (plane->plane.state->crtc != &rcrtc->crtc ||
369 !plane->plane.state->visible)
370 continue;
372 /* Insert the plane in the sorted planes array. */
373 for (j = num_planes++; j > 0; --j) {
374 if (plane_zpos(planes[j-1]) <= plane_zpos(plane))
375 break;
376 planes[j] = planes[j-1];
379 planes[j] = plane;
380 prio += plane_format(plane)->planes * 4;
383 for (i = 0; i < num_planes; ++i) {
384 struct rcar_du_plane *plane = planes[i];
385 struct drm_plane_state *state = plane->plane.state;
386 unsigned int index = to_rcar_plane_state(state)->hwindex;
388 prio -= 4;
389 dspr |= (index + 1) << prio;
390 hwplanes |= 1 << index;
392 if (plane_format(plane)->planes == 2) {
393 index = (index + 1) % 8;
395 prio -= 4;
396 dspr |= (index + 1) << prio;
397 hwplanes |= 1 << index;
401 /* If VSP+DU integration is enabled the plane assignment is fixed. */
402 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
403 if (rcdu->info->gen < 3) {
404 dspr = (rcrtc->index % 2) + 1;
405 hwplanes = 1 << (rcrtc->index % 2);
406 } else {
407 dspr = (rcrtc->index % 2) ? 3 : 1;
408 hwplanes = 1 << ((rcrtc->index % 2) ? 2 : 0);
413 * Update the planes to display timing and dot clock generator
414 * associations.
416 * Updating the DPTSR register requires restarting the CRTC group,
417 * resulting in visible flicker. To mitigate the issue only update the
418 * association if needed by enabled planes. Planes being disabled will
419 * keep their current association.
421 mutex_lock(&rcrtc->group->lock);
423 dptsr_planes = rcrtc->index % 2 ? rcrtc->group->dptsr_planes | hwplanes
424 : rcrtc->group->dptsr_planes & ~hwplanes;
426 if (dptsr_planes != rcrtc->group->dptsr_planes) {
427 rcar_du_group_write(rcrtc->group, DPTSR,
428 (dptsr_planes << 16) | dptsr_planes);
429 rcrtc->group->dptsr_planes = dptsr_planes;
431 if (rcrtc->group->used_crtcs)
432 rcar_du_group_restart(rcrtc->group);
435 /* Restart the group if plane sources have changed. */
436 if (rcrtc->group->need_restart)
437 rcar_du_group_restart(rcrtc->group);
439 mutex_unlock(&rcrtc->group->lock);
441 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
442 dspr);
445 /* -----------------------------------------------------------------------------
446 * Page Flip
449 void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
451 struct drm_pending_vblank_event *event;
452 struct drm_device *dev = rcrtc->crtc.dev;
453 unsigned long flags;
455 spin_lock_irqsave(&dev->event_lock, flags);
456 event = rcrtc->event;
457 rcrtc->event = NULL;
458 spin_unlock_irqrestore(&dev->event_lock, flags);
460 if (event == NULL)
461 return;
463 spin_lock_irqsave(&dev->event_lock, flags);
464 drm_crtc_send_vblank_event(&rcrtc->crtc, event);
465 wake_up(&rcrtc->flip_wait);
466 spin_unlock_irqrestore(&dev->event_lock, flags);
468 drm_crtc_vblank_put(&rcrtc->crtc);
471 static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
473 struct drm_device *dev = rcrtc->crtc.dev;
474 unsigned long flags;
475 bool pending;
477 spin_lock_irqsave(&dev->event_lock, flags);
478 pending = rcrtc->event != NULL;
479 spin_unlock_irqrestore(&dev->event_lock, flags);
481 return pending;
484 static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
486 struct rcar_du_device *rcdu = rcrtc->group->dev;
488 if (wait_event_timeout(rcrtc->flip_wait,
489 !rcar_du_crtc_page_flip_pending(rcrtc),
490 msecs_to_jiffies(50)))
491 return;
493 dev_warn(rcdu->dev, "page flip timeout\n");
495 rcar_du_crtc_finish_page_flip(rcrtc);
498 /* -----------------------------------------------------------------------------
499 * Start/Stop and Suspend/Resume
502 static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc)
504 /* Set display off and background to black */
505 rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
506 rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
508 /* Configure display timings and output routing */
509 rcar_du_crtc_set_display_timing(rcrtc);
510 rcar_du_group_set_routing(rcrtc->group);
512 /* Start with all planes disabled. */
513 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
515 /* Enable the VSP compositor. */
516 if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
517 rcar_du_vsp_enable(rcrtc);
519 /* Turn vertical blanking interrupt reporting on. */
520 drm_crtc_vblank_on(&rcrtc->crtc);
523 static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
525 int ret;
528 * Guard against double-get, as the function is called from both the
529 * .atomic_enable() and .atomic_begin() handlers.
531 if (rcrtc->initialized)
532 return 0;
534 ret = clk_prepare_enable(rcrtc->clock);
535 if (ret < 0)
536 return ret;
538 ret = clk_prepare_enable(rcrtc->extclock);
539 if (ret < 0)
540 goto error_clock;
542 ret = rcar_du_group_get(rcrtc->group);
543 if (ret < 0)
544 goto error_group;
546 rcar_du_crtc_setup(rcrtc);
547 rcrtc->initialized = true;
549 return 0;
551 error_group:
552 clk_disable_unprepare(rcrtc->extclock);
553 error_clock:
554 clk_disable_unprepare(rcrtc->clock);
555 return ret;
558 static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
560 rcar_du_group_put(rcrtc->group);
562 clk_disable_unprepare(rcrtc->extclock);
563 clk_disable_unprepare(rcrtc->clock);
565 rcrtc->initialized = false;
568 static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
570 bool interlaced;
573 * Select master sync mode. This enables display operation in master
574 * sync mode (with the HSYNC and VSYNC signals configured as outputs and
575 * actively driven).
577 interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE;
578 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK | DSYSR_SCM_MASK,
579 (interlaced ? DSYSR_SCM_INT_VIDEO : 0) |
580 DSYSR_TVM_MASTER);
582 rcar_du_group_start_stop(rcrtc->group, true);
585 static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
587 struct rcar_du_device *rcdu = rcrtc->group->dev;
588 struct drm_crtc *crtc = &rcrtc->crtc;
589 u32 status;
591 /* Make sure vblank interrupts are enabled. */
592 drm_crtc_vblank_get(crtc);
595 * Disable planes and calculate how many vertical blanking interrupts we
596 * have to wait for. If a vertical blanking interrupt has been triggered
597 * but not processed yet, we don't know whether it occurred before or
598 * after the planes got disabled. We thus have to wait for two vblank
599 * interrupts in that case.
601 spin_lock_irq(&rcrtc->vblank_lock);
602 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
603 status = rcar_du_crtc_read(rcrtc, DSSR);
604 rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1;
605 spin_unlock_irq(&rcrtc->vblank_lock);
607 if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0,
608 msecs_to_jiffies(100)))
609 dev_warn(rcdu->dev, "vertical blanking timeout\n");
611 drm_crtc_vblank_put(crtc);
614 static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
616 struct drm_crtc *crtc = &rcrtc->crtc;
619 * Disable all planes and wait for the change to take effect. This is
620 * required as the plane enable registers are updated on vblank, and no
621 * vblank will occur once the CRTC is stopped. Disabling planes when
622 * starting the CRTC thus wouldn't be enough as it would start scanning
623 * out immediately from old frame buffers until the next vblank.
625 * This increases the CRTC stop delay, especially when multiple CRTCs
626 * are stopped in one operation as we now wait for one vblank per CRTC.
627 * Whether this can be improved needs to be researched.
629 rcar_du_crtc_disable_planes(rcrtc);
632 * Disable vertical blanking interrupt reporting. We first need to wait
633 * for page flip completion before stopping the CRTC as userspace
634 * expects page flips to eventually complete.
636 rcar_du_crtc_wait_page_flip(rcrtc);
637 drm_crtc_vblank_off(crtc);
639 /* Disable the VSP compositor. */
640 if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
641 rcar_du_vsp_disable(rcrtc);
644 * Select switch sync mode. This stops display operation and configures
645 * the HSYNC and VSYNC signals as inputs.
647 * TODO: Find another way to stop the display for DUs that don't support
648 * TVM sync.
650 if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_TVM_SYNC))
651 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK,
652 DSYSR_TVM_SWITCH);
654 rcar_du_group_start_stop(rcrtc->group, false);
657 /* -----------------------------------------------------------------------------
658 * CRTC Functions
661 static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
662 struct drm_crtc_state *old_state)
664 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
666 rcar_du_crtc_get(rcrtc);
667 rcar_du_crtc_start(rcrtc);
670 static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
671 struct drm_crtc_state *old_state)
673 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
675 rcar_du_crtc_stop(rcrtc);
676 rcar_du_crtc_put(rcrtc);
678 spin_lock_irq(&crtc->dev->event_lock);
679 if (crtc->state->event) {
680 drm_crtc_send_vblank_event(crtc, crtc->state->event);
681 crtc->state->event = NULL;
683 spin_unlock_irq(&crtc->dev->event_lock);
685 rcrtc->outputs = 0;
688 static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
689 struct drm_crtc_state *old_crtc_state)
691 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
693 WARN_ON(!crtc->state->enable);
696 * If a mode set is in progress we can be called with the CRTC disabled.
697 * We thus need to first get and setup the CRTC in order to configure
698 * planes. We must *not* put the CRTC in .atomic_flush(), as it must be
699 * kept awake until the .atomic_enable() call that will follow. The get
700 * operation in .atomic_enable() will in that case be a no-op, and the
701 * CRTC will be put later in .atomic_disable().
703 * If a mode set is not in progress the CRTC is enabled, and the
704 * following get call will be a no-op. There is thus no need to belance
705 * it in .atomic_flush() either.
707 rcar_du_crtc_get(rcrtc);
709 if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
710 rcar_du_vsp_atomic_begin(rcrtc);
713 static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
714 struct drm_crtc_state *old_crtc_state)
716 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
717 struct drm_device *dev = rcrtc->crtc.dev;
718 unsigned long flags;
720 rcar_du_crtc_update_planes(rcrtc);
722 if (crtc->state->event) {
723 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
725 spin_lock_irqsave(&dev->event_lock, flags);
726 rcrtc->event = crtc->state->event;
727 crtc->state->event = NULL;
728 spin_unlock_irqrestore(&dev->event_lock, flags);
731 if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
732 rcar_du_vsp_atomic_flush(rcrtc);
735 enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
736 const struct drm_display_mode *mode)
738 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
739 struct rcar_du_device *rcdu = rcrtc->group->dev;
740 bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
742 if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
743 return MODE_NO_INTERLACE;
745 return MODE_OK;
748 static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
749 .atomic_begin = rcar_du_crtc_atomic_begin,
750 .atomic_flush = rcar_du_crtc_atomic_flush,
751 .atomic_enable = rcar_du_crtc_atomic_enable,
752 .atomic_disable = rcar_du_crtc_atomic_disable,
753 .mode_valid = rcar_du_crtc_mode_valid,
756 static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc)
758 struct rcar_du_device *rcdu = rcrtc->group->dev;
759 const char **sources;
760 unsigned int count;
761 int i = -1;
763 /* CRC available only on Gen3 HW. */
764 if (rcdu->info->gen < 3)
765 return;
767 /* Reserve 1 for "auto" source. */
768 count = rcrtc->vsp->num_planes + 1;
770 sources = kmalloc_array(count, sizeof(*sources), GFP_KERNEL);
771 if (!sources)
772 return;
774 sources[0] = kstrdup("auto", GFP_KERNEL);
775 if (!sources[0])
776 goto error;
778 for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
779 struct drm_plane *plane = &rcrtc->vsp->planes[i].plane;
780 char name[16];
782 sprintf(name, "plane%u", plane->base.id);
783 sources[i + 1] = kstrdup(name, GFP_KERNEL);
784 if (!sources[i + 1])
785 goto error;
788 rcrtc->sources = sources;
789 rcrtc->sources_count = count;
790 return;
792 error:
793 while (i >= 0) {
794 kfree(sources[i]);
795 i--;
797 kfree(sources);
800 static void rcar_du_crtc_crc_cleanup(struct rcar_du_crtc *rcrtc)
802 unsigned int i;
804 if (!rcrtc->sources)
805 return;
807 for (i = 0; i < rcrtc->sources_count; i++)
808 kfree(rcrtc->sources[i]);
809 kfree(rcrtc->sources);
811 rcrtc->sources = NULL;
812 rcrtc->sources_count = 0;
815 static struct drm_crtc_state *
816 rcar_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
818 struct rcar_du_crtc_state *state;
819 struct rcar_du_crtc_state *copy;
821 if (WARN_ON(!crtc->state))
822 return NULL;
824 state = to_rcar_crtc_state(crtc->state);
825 copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
826 if (copy == NULL)
827 return NULL;
829 __drm_atomic_helper_crtc_duplicate_state(crtc, &copy->state);
831 return &copy->state;
834 static void rcar_du_crtc_atomic_destroy_state(struct drm_crtc *crtc,
835 struct drm_crtc_state *state)
837 __drm_atomic_helper_crtc_destroy_state(state);
838 kfree(to_rcar_crtc_state(state));
841 static void rcar_du_crtc_cleanup(struct drm_crtc *crtc)
843 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
845 rcar_du_crtc_crc_cleanup(rcrtc);
847 return drm_crtc_cleanup(crtc);
850 static void rcar_du_crtc_reset(struct drm_crtc *crtc)
852 struct rcar_du_crtc_state *state;
854 if (crtc->state) {
855 rcar_du_crtc_atomic_destroy_state(crtc, crtc->state);
856 crtc->state = NULL;
859 state = kzalloc(sizeof(*state), GFP_KERNEL);
860 if (state == NULL)
861 return;
863 state->crc.source = VSP1_DU_CRC_NONE;
864 state->crc.index = 0;
866 crtc->state = &state->state;
867 crtc->state->crtc = crtc;
870 static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc)
872 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
874 rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
875 rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
876 rcrtc->vblank_enable = true;
878 return 0;
881 static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc)
883 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
885 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
886 rcrtc->vblank_enable = false;
889 static int rcar_du_crtc_parse_crc_source(struct rcar_du_crtc *rcrtc,
890 const char *source_name,
891 enum vsp1_du_crc_source *source)
893 unsigned int index;
894 int ret;
897 * Parse the source name. Supported values are "plane%u" to compute the
898 * CRC on an input plane (%u is the plane ID), and "auto" to compute the
899 * CRC on the composer (VSP) output.
902 if (!source_name) {
903 *source = VSP1_DU_CRC_NONE;
904 return 0;
905 } else if (!strcmp(source_name, "auto")) {
906 *source = VSP1_DU_CRC_OUTPUT;
907 return 0;
908 } else if (strstarts(source_name, "plane")) {
909 unsigned int i;
911 *source = VSP1_DU_CRC_PLANE;
913 ret = kstrtouint(source_name + strlen("plane"), 10, &index);
914 if (ret < 0)
915 return ret;
917 for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
918 if (index == rcrtc->vsp->planes[i].plane.base.id)
919 return i;
923 return -EINVAL;
926 static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc,
927 const char *source_name,
928 size_t *values_cnt)
930 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
931 enum vsp1_du_crc_source source;
933 if (rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source) < 0) {
934 DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
935 return -EINVAL;
938 *values_cnt = 1;
939 return 0;
942 const char *const *rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc,
943 size_t *count)
945 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
947 *count = rcrtc->sources_count;
948 return rcrtc->sources;
951 static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
952 const char *source_name)
954 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
955 struct drm_modeset_acquire_ctx ctx;
956 struct drm_crtc_state *crtc_state;
957 struct drm_atomic_state *state;
958 enum vsp1_du_crc_source source;
959 unsigned int index;
960 int ret;
962 ret = rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source);
963 if (ret < 0)
964 return ret;
966 index = ret;
968 /* Perform an atomic commit to set the CRC source. */
969 drm_modeset_acquire_init(&ctx, 0);
971 state = drm_atomic_state_alloc(crtc->dev);
972 if (!state) {
973 ret = -ENOMEM;
974 goto unlock;
977 state->acquire_ctx = &ctx;
979 retry:
980 crtc_state = drm_atomic_get_crtc_state(state, crtc);
981 if (!IS_ERR(crtc_state)) {
982 struct rcar_du_crtc_state *rcrtc_state;
984 rcrtc_state = to_rcar_crtc_state(crtc_state);
985 rcrtc_state->crc.source = source;
986 rcrtc_state->crc.index = index;
988 ret = drm_atomic_commit(state);
989 } else {
990 ret = PTR_ERR(crtc_state);
993 if (ret == -EDEADLK) {
994 drm_atomic_state_clear(state);
995 drm_modeset_backoff(&ctx);
996 goto retry;
999 drm_atomic_state_put(state);
1001 unlock:
1002 drm_modeset_drop_locks(&ctx);
1003 drm_modeset_acquire_fini(&ctx);
1005 return 0;
1008 static const struct drm_crtc_funcs crtc_funcs_gen2 = {
1009 .reset = rcar_du_crtc_reset,
1010 .destroy = drm_crtc_cleanup,
1011 .set_config = drm_atomic_helper_set_config,
1012 .page_flip = drm_atomic_helper_page_flip,
1013 .atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
1014 .atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
1015 .enable_vblank = rcar_du_crtc_enable_vblank,
1016 .disable_vblank = rcar_du_crtc_disable_vblank,
1019 static const struct drm_crtc_funcs crtc_funcs_gen3 = {
1020 .reset = rcar_du_crtc_reset,
1021 .destroy = rcar_du_crtc_cleanup,
1022 .set_config = drm_atomic_helper_set_config,
1023 .page_flip = drm_atomic_helper_page_flip,
1024 .atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
1025 .atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
1026 .enable_vblank = rcar_du_crtc_enable_vblank,
1027 .disable_vblank = rcar_du_crtc_disable_vblank,
1028 .set_crc_source = rcar_du_crtc_set_crc_source,
1029 .verify_crc_source = rcar_du_crtc_verify_crc_source,
1030 .get_crc_sources = rcar_du_crtc_get_crc_sources,
1033 /* -----------------------------------------------------------------------------
1034 * Interrupt Handling
1037 static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
1039 struct rcar_du_crtc *rcrtc = arg;
1040 struct rcar_du_device *rcdu = rcrtc->group->dev;
1041 irqreturn_t ret = IRQ_NONE;
1042 u32 status;
1044 spin_lock(&rcrtc->vblank_lock);
1046 status = rcar_du_crtc_read(rcrtc, DSSR);
1047 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
1049 if (status & DSSR_VBK) {
1051 * Wake up the vblank wait if the counter reaches 0. This must
1052 * be protected by the vblank_lock to avoid races in
1053 * rcar_du_crtc_disable_planes().
1055 if (rcrtc->vblank_count) {
1056 if (--rcrtc->vblank_count == 0)
1057 wake_up(&rcrtc->vblank_wait);
1061 spin_unlock(&rcrtc->vblank_lock);
1063 if (status & DSSR_VBK) {
1064 if (rcdu->info->gen < 3) {
1065 drm_crtc_handle_vblank(&rcrtc->crtc);
1066 rcar_du_crtc_finish_page_flip(rcrtc);
1069 ret = IRQ_HANDLED;
1072 return ret;
1075 /* -----------------------------------------------------------------------------
1076 * Initialization
1079 int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
1080 unsigned int hwindex)
1082 static const unsigned int mmio_offsets[] = {
1083 DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET, DU3_REG_OFFSET
1086 struct rcar_du_device *rcdu = rgrp->dev;
1087 struct platform_device *pdev = to_platform_device(rcdu->dev);
1088 struct rcar_du_crtc *rcrtc = &rcdu->crtcs[swindex];
1089 struct drm_crtc *crtc = &rcrtc->crtc;
1090 struct drm_plane *primary;
1091 unsigned int irqflags;
1092 struct clk *clk;
1093 char clk_name[9];
1094 char *name;
1095 int irq;
1096 int ret;
1098 /* Get the CRTC clock and the optional external clock. */
1099 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
1100 sprintf(clk_name, "du.%u", hwindex);
1101 name = clk_name;
1102 } else {
1103 name = NULL;
1106 rcrtc->clock = devm_clk_get(rcdu->dev, name);
1107 if (IS_ERR(rcrtc->clock)) {
1108 dev_err(rcdu->dev, "no clock for DU channel %u\n", hwindex);
1109 return PTR_ERR(rcrtc->clock);
1112 sprintf(clk_name, "dclkin.%u", hwindex);
1113 clk = devm_clk_get(rcdu->dev, clk_name);
1114 if (!IS_ERR(clk)) {
1115 rcrtc->extclock = clk;
1116 } else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
1117 dev_info(rcdu->dev, "can't get external clock %u\n", hwindex);
1118 return -EPROBE_DEFER;
1121 init_waitqueue_head(&rcrtc->flip_wait);
1122 init_waitqueue_head(&rcrtc->vblank_wait);
1123 spin_lock_init(&rcrtc->vblank_lock);
1125 rcrtc->group = rgrp;
1126 rcrtc->mmio_offset = mmio_offsets[hwindex];
1127 rcrtc->index = hwindex;
1128 rcrtc->dsysr = (rcrtc->index % 2 ? 0 : DSYSR_DRES) | DSYSR_TVM_TVSYNC;
1130 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
1131 primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane;
1132 else
1133 primary = &rgrp->planes[swindex % 2].plane;
1135 ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary, NULL,
1136 rcdu->info->gen <= 2 ?
1137 &crtc_funcs_gen2 : &crtc_funcs_gen3,
1138 NULL);
1139 if (ret < 0)
1140 return ret;
1142 drm_crtc_helper_add(crtc, &crtc_helper_funcs);
1144 /* Start with vertical blanking interrupt reporting disabled. */
1145 drm_crtc_vblank_off(crtc);
1147 /* Register the interrupt handler. */
1148 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
1149 /* The IRQ's are associated with the CRTC (sw)index. */
1150 irq = platform_get_irq(pdev, swindex);
1151 irqflags = 0;
1152 } else {
1153 irq = platform_get_irq(pdev, 0);
1154 irqflags = IRQF_SHARED;
1157 if (irq < 0) {
1158 dev_err(rcdu->dev, "no IRQ for CRTC %u\n", swindex);
1159 return irq;
1162 ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
1163 dev_name(rcdu->dev), rcrtc);
1164 if (ret < 0) {
1165 dev_err(rcdu->dev,
1166 "failed to register IRQ for CRTC %u\n", swindex);
1167 return ret;
1170 rcar_du_crtc_crc_init(rcrtc);
1172 return 0;