2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_crtc.h>
19 #include <drm/drm_crtc_helper.h>
20 #include <drm/drm_flip_work.h>
21 #include <drm/drm_plane_helper.h>
22 #ifdef CONFIG_DRM_ANALOGIX_DP
23 #include <drm/bridge/analogix_dp.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/clk.h>
30 #include <linux/iopoll.h>
32 #include <linux/of_device.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/component.h>
36 #include <linux/reset.h>
37 #include <linux/delay.h>
39 #include "rockchip_drm_drv.h"
40 #include "rockchip_drm_gem.h"
41 #include "rockchip_drm_fb.h"
42 #include "rockchip_drm_psr.h"
43 #include "rockchip_drm_vop.h"
45 #define VOP_WIN_SET(x, win, name, v) \
46 vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name)
47 #define VOP_SCL_SET(x, win, name, v) \
48 vop_reg_set(vop, &win->phy->scl->name, win->base, ~0, v, #name)
49 #define VOP_SCL_SET_EXT(x, win, name, v) \
50 vop_reg_set(vop, &win->phy->scl->ext->name, \
51 win->base, ~0, v, #name)
53 #define VOP_INTR_SET_MASK(vop, name, mask, v) \
54 vop_reg_set(vop, &vop->data->intr->name, 0, mask, v, #name)
56 #define VOP_REG_SET(vop, group, name, v) \
57 vop_reg_set(vop, &vop->data->group->name, 0, ~0, v, #name)
59 #define VOP_INTR_SET_TYPE(vop, name, type, v) \
61 int i, reg = 0, mask = 0; \
62 for (i = 0; i < vop->data->intr->nintrs; i++) { \
63 if (vop->data->intr->intrs[i] & type) { \
68 VOP_INTR_SET_MASK(vop, name, mask, reg); \
70 #define VOP_INTR_GET_TYPE(vop, name, type) \
71 vop_get_intr_type(vop, &vop->data->intr->name, type)
73 #define VOP_WIN_GET(x, win, name) \
74 vop_read_reg(x, win->offset, win->phy->name)
76 #define VOP_WIN_GET_YRGBADDR(vop, win) \
77 vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
79 #define to_vop(x) container_of(x, struct vop, crtc)
80 #define to_vop_win(x) container_of(x, struct vop_win, base)
87 struct drm_plane base
;
88 const struct vop_win_data
*data
;
95 struct drm_device
*drm_dev
;
98 /* mutex vsync_ work */
99 struct mutex vsync_mutex
;
100 bool vsync_work_pending
;
101 struct completion dsp_hold_completion
;
103 /* protected by dev->event_lock */
104 struct drm_pending_vblank_event
*event
;
106 struct drm_flip_work fb_unref_work
;
107 unsigned long pending
;
109 struct completion line_flag_completion
;
111 const struct vop_data
*data
;
116 /* physical map length of vop register */
119 /* one time only one process allowed to config the register */
121 /* lock vop irq reg */
130 /* vop share memory frequency */
134 struct reset_control
*dclk_rst
;
136 struct vop_win win
[];
139 static inline void vop_writel(struct vop
*vop
, uint32_t offset
, uint32_t v
)
141 writel(v
, vop
->regs
+ offset
);
142 vop
->regsbak
[offset
>> 2] = v
;
145 static inline uint32_t vop_readl(struct vop
*vop
, uint32_t offset
)
147 return readl(vop
->regs
+ offset
);
150 static inline uint32_t vop_read_reg(struct vop
*vop
, uint32_t base
,
151 const struct vop_reg
*reg
)
153 return (vop_readl(vop
, base
+ reg
->offset
) >> reg
->shift
) & reg
->mask
;
156 static void vop_reg_set(struct vop
*vop
, const struct vop_reg
*reg
,
157 uint32_t _offset
, uint32_t _mask
, uint32_t v
,
158 const char *reg_name
)
160 int offset
, mask
, shift
;
162 if (!reg
|| !reg
->mask
) {
163 DRM_DEV_DEBUG(vop
->dev
, "Warning: not support %s\n", reg_name
);
167 offset
= reg
->offset
+ _offset
;
168 mask
= reg
->mask
& _mask
;
171 if (reg
->write_mask
) {
172 v
= ((v
<< shift
) & 0xffff) | (mask
<< (shift
+ 16));
174 uint32_t cached_val
= vop
->regsbak
[offset
>> 2];
176 v
= (cached_val
& ~(mask
<< shift
)) | ((v
& mask
) << shift
);
177 vop
->regsbak
[offset
>> 2] = v
;
181 writel_relaxed(v
, vop
->regs
+ offset
);
183 writel(v
, vop
->regs
+ offset
);
186 static inline uint32_t vop_get_intr_type(struct vop
*vop
,
187 const struct vop_reg
*reg
, int type
)
190 uint32_t regs
= vop_read_reg(vop
, 0, reg
);
192 for (i
= 0; i
< vop
->data
->intr
->nintrs
; i
++) {
193 if ((type
& vop
->data
->intr
->intrs
[i
]) && (regs
& 1 << i
))
194 ret
|= vop
->data
->intr
->intrs
[i
];
200 static inline void vop_cfg_done(struct vop
*vop
)
202 VOP_REG_SET(vop
, common
, cfg_done
, 1);
205 static bool has_rb_swapped(uint32_t format
)
208 case DRM_FORMAT_XBGR8888
:
209 case DRM_FORMAT_ABGR8888
:
210 case DRM_FORMAT_BGR888
:
211 case DRM_FORMAT_BGR565
:
218 static enum vop_data_format
vop_convert_format(uint32_t format
)
221 case DRM_FORMAT_XRGB8888
:
222 case DRM_FORMAT_ARGB8888
:
223 case DRM_FORMAT_XBGR8888
:
224 case DRM_FORMAT_ABGR8888
:
225 return VOP_FMT_ARGB8888
;
226 case DRM_FORMAT_RGB888
:
227 case DRM_FORMAT_BGR888
:
228 return VOP_FMT_RGB888
;
229 case DRM_FORMAT_RGB565
:
230 case DRM_FORMAT_BGR565
:
231 return VOP_FMT_RGB565
;
232 case DRM_FORMAT_NV12
:
233 return VOP_FMT_YUV420SP
;
234 case DRM_FORMAT_NV16
:
235 return VOP_FMT_YUV422SP
;
236 case DRM_FORMAT_NV24
:
237 return VOP_FMT_YUV444SP
;
239 DRM_ERROR("unsupported format[%08x]\n", format
);
244 static bool is_yuv_support(uint32_t format
)
247 case DRM_FORMAT_NV12
:
248 case DRM_FORMAT_NV16
:
249 case DRM_FORMAT_NV24
:
256 static bool is_alpha_support(uint32_t format
)
259 case DRM_FORMAT_ARGB8888
:
260 case DRM_FORMAT_ABGR8888
:
267 static uint16_t scl_vop_cal_scale(enum scale_mode mode
, uint32_t src
,
268 uint32_t dst
, bool is_horizontal
,
269 int vsu_mode
, int *vskiplines
)
271 uint16_t val
= 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT
;
274 if (mode
== SCALE_UP
)
275 val
= GET_SCL_FT_BIC(src
, dst
);
276 else if (mode
== SCALE_DOWN
)
277 val
= GET_SCL_FT_BILI_DN(src
, dst
);
279 if (mode
== SCALE_UP
) {
280 if (vsu_mode
== SCALE_UP_BIL
)
281 val
= GET_SCL_FT_BILI_UP(src
, dst
);
283 val
= GET_SCL_FT_BIC(src
, dst
);
284 } else if (mode
== SCALE_DOWN
) {
286 *vskiplines
= scl_get_vskiplines(src
, dst
);
287 val
= scl_get_bili_dn_vskip(src
, dst
,
290 val
= GET_SCL_FT_BILI_DN(src
, dst
);
298 static void scl_vop_cal_scl_fac(struct vop
*vop
, const struct vop_win_data
*win
,
299 uint32_t src_w
, uint32_t src_h
, uint32_t dst_w
,
300 uint32_t dst_h
, uint32_t pixel_format
)
302 uint16_t yrgb_hor_scl_mode
, yrgb_ver_scl_mode
;
303 uint16_t cbcr_hor_scl_mode
= SCALE_NONE
;
304 uint16_t cbcr_ver_scl_mode
= SCALE_NONE
;
305 int hsub
= drm_format_horz_chroma_subsampling(pixel_format
);
306 int vsub
= drm_format_vert_chroma_subsampling(pixel_format
);
307 bool is_yuv
= is_yuv_support(pixel_format
);
308 uint16_t cbcr_src_w
= src_w
/ hsub
;
309 uint16_t cbcr_src_h
= src_h
/ vsub
;
316 DRM_DEV_ERROR(vop
->dev
, "Maximum dst width (3840) exceeded\n");
320 if (!win
->phy
->scl
->ext
) {
321 VOP_SCL_SET(vop
, win
, scale_yrgb_x
,
322 scl_cal_scale2(src_w
, dst_w
));
323 VOP_SCL_SET(vop
, win
, scale_yrgb_y
,
324 scl_cal_scale2(src_h
, dst_h
));
326 VOP_SCL_SET(vop
, win
, scale_cbcr_x
,
327 scl_cal_scale2(cbcr_src_w
, dst_w
));
328 VOP_SCL_SET(vop
, win
, scale_cbcr_y
,
329 scl_cal_scale2(cbcr_src_h
, dst_h
));
334 yrgb_hor_scl_mode
= scl_get_scl_mode(src_w
, dst_w
);
335 yrgb_ver_scl_mode
= scl_get_scl_mode(src_h
, dst_h
);
338 cbcr_hor_scl_mode
= scl_get_scl_mode(cbcr_src_w
, dst_w
);
339 cbcr_ver_scl_mode
= scl_get_scl_mode(cbcr_src_h
, dst_h
);
340 if (cbcr_hor_scl_mode
== SCALE_DOWN
)
341 lb_mode
= scl_vop_cal_lb_mode(dst_w
, true);
343 lb_mode
= scl_vop_cal_lb_mode(cbcr_src_w
, true);
345 if (yrgb_hor_scl_mode
== SCALE_DOWN
)
346 lb_mode
= scl_vop_cal_lb_mode(dst_w
, false);
348 lb_mode
= scl_vop_cal_lb_mode(src_w
, false);
351 VOP_SCL_SET_EXT(vop
, win
, lb_mode
, lb_mode
);
352 if (lb_mode
== LB_RGB_3840X2
) {
353 if (yrgb_ver_scl_mode
!= SCALE_NONE
) {
354 DRM_DEV_ERROR(vop
->dev
, "not allow yrgb ver scale\n");
357 if (cbcr_ver_scl_mode
!= SCALE_NONE
) {
358 DRM_DEV_ERROR(vop
->dev
, "not allow cbcr ver scale\n");
361 vsu_mode
= SCALE_UP_BIL
;
362 } else if (lb_mode
== LB_RGB_2560X4
) {
363 vsu_mode
= SCALE_UP_BIL
;
365 vsu_mode
= SCALE_UP_BIC
;
368 val
= scl_vop_cal_scale(yrgb_hor_scl_mode
, src_w
, dst_w
,
370 VOP_SCL_SET(vop
, win
, scale_yrgb_x
, val
);
371 val
= scl_vop_cal_scale(yrgb_ver_scl_mode
, src_h
, dst_h
,
372 false, vsu_mode
, &vskiplines
);
373 VOP_SCL_SET(vop
, win
, scale_yrgb_y
, val
);
375 VOP_SCL_SET_EXT(vop
, win
, vsd_yrgb_gt4
, vskiplines
== 4);
376 VOP_SCL_SET_EXT(vop
, win
, vsd_yrgb_gt2
, vskiplines
== 2);
378 VOP_SCL_SET_EXT(vop
, win
, yrgb_hor_scl_mode
, yrgb_hor_scl_mode
);
379 VOP_SCL_SET_EXT(vop
, win
, yrgb_ver_scl_mode
, yrgb_ver_scl_mode
);
380 VOP_SCL_SET_EXT(vop
, win
, yrgb_hsd_mode
, SCALE_DOWN_BIL
);
381 VOP_SCL_SET_EXT(vop
, win
, yrgb_vsd_mode
, SCALE_DOWN_BIL
);
382 VOP_SCL_SET_EXT(vop
, win
, yrgb_vsu_mode
, vsu_mode
);
384 val
= scl_vop_cal_scale(cbcr_hor_scl_mode
, cbcr_src_w
,
385 dst_w
, true, 0, NULL
);
386 VOP_SCL_SET(vop
, win
, scale_cbcr_x
, val
);
387 val
= scl_vop_cal_scale(cbcr_ver_scl_mode
, cbcr_src_h
,
388 dst_h
, false, vsu_mode
, &vskiplines
);
389 VOP_SCL_SET(vop
, win
, scale_cbcr_y
, val
);
391 VOP_SCL_SET_EXT(vop
, win
, vsd_cbcr_gt4
, vskiplines
== 4);
392 VOP_SCL_SET_EXT(vop
, win
, vsd_cbcr_gt2
, vskiplines
== 2);
393 VOP_SCL_SET_EXT(vop
, win
, cbcr_hor_scl_mode
, cbcr_hor_scl_mode
);
394 VOP_SCL_SET_EXT(vop
, win
, cbcr_ver_scl_mode
, cbcr_ver_scl_mode
);
395 VOP_SCL_SET_EXT(vop
, win
, cbcr_hsd_mode
, SCALE_DOWN_BIL
);
396 VOP_SCL_SET_EXT(vop
, win
, cbcr_vsd_mode
, SCALE_DOWN_BIL
);
397 VOP_SCL_SET_EXT(vop
, win
, cbcr_vsu_mode
, vsu_mode
);
401 static void vop_dsp_hold_valid_irq_enable(struct vop
*vop
)
405 if (WARN_ON(!vop
->is_enabled
))
408 spin_lock_irqsave(&vop
->irq_lock
, flags
);
410 VOP_INTR_SET_TYPE(vop
, clear
, DSP_HOLD_VALID_INTR
, 1);
411 VOP_INTR_SET_TYPE(vop
, enable
, DSP_HOLD_VALID_INTR
, 1);
413 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
416 static void vop_dsp_hold_valid_irq_disable(struct vop
*vop
)
420 if (WARN_ON(!vop
->is_enabled
))
423 spin_lock_irqsave(&vop
->irq_lock
, flags
);
425 VOP_INTR_SET_TYPE(vop
, enable
, DSP_HOLD_VALID_INTR
, 0);
427 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
431 * (1) each frame starts at the start of the Vsync pulse which is signaled by
432 * the "FRAME_SYNC" interrupt.
433 * (2) the active data region of each frame ends at dsp_vact_end
434 * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
435 * to get "LINE_FLAG" interrupt at the end of the active on screen data.
437 * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
439 * LINE_FLAG -------------------------------+
443 * | Vsync | Vbp | Vactive | Vfp |
447 * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END
448 * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END
449 * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END
450 * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END
452 static bool vop_line_flag_irq_is_enabled(struct vop
*vop
)
454 uint32_t line_flag_irq
;
457 spin_lock_irqsave(&vop
->irq_lock
, flags
);
459 line_flag_irq
= VOP_INTR_GET_TYPE(vop
, enable
, LINE_FLAG_INTR
);
461 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
463 return !!line_flag_irq
;
466 static void vop_line_flag_irq_enable(struct vop
*vop
)
470 if (WARN_ON(!vop
->is_enabled
))
473 spin_lock_irqsave(&vop
->irq_lock
, flags
);
475 VOP_INTR_SET_TYPE(vop
, clear
, LINE_FLAG_INTR
, 1);
476 VOP_INTR_SET_TYPE(vop
, enable
, LINE_FLAG_INTR
, 1);
478 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
481 static void vop_line_flag_irq_disable(struct vop
*vop
)
485 if (WARN_ON(!vop
->is_enabled
))
488 spin_lock_irqsave(&vop
->irq_lock
, flags
);
490 VOP_INTR_SET_TYPE(vop
, enable
, LINE_FLAG_INTR
, 0);
492 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
495 static int vop_enable(struct drm_crtc
*crtc
)
497 struct vop
*vop
= to_vop(crtc
);
500 ret
= pm_runtime_get_sync(vop
->dev
);
502 DRM_DEV_ERROR(vop
->dev
, "failed to get pm runtime: %d\n", ret
);
506 ret
= clk_enable(vop
->hclk
);
507 if (WARN_ON(ret
< 0))
508 goto err_put_pm_runtime
;
510 ret
= clk_enable(vop
->dclk
);
511 if (WARN_ON(ret
< 0))
512 goto err_disable_hclk
;
514 ret
= clk_enable(vop
->aclk
);
515 if (WARN_ON(ret
< 0))
516 goto err_disable_dclk
;
519 * Slave iommu shares power, irq and clock with vop. It was associated
520 * automatically with this master device via common driver code.
521 * Now that we have enabled the clock we attach it to the shared drm
524 ret
= rockchip_drm_dma_attach_device(vop
->drm_dev
, vop
->dev
);
526 DRM_DEV_ERROR(vop
->dev
,
527 "failed to attach dma mapping, %d\n", ret
);
528 goto err_disable_aclk
;
531 memcpy(vop
->regs
, vop
->regsbak
, vop
->len
);
533 * We need to make sure that all windows are disabled before we
534 * enable the crtc. Otherwise we might try to scan from a destroyed
537 for (i
= 0; i
< vop
->data
->win_size
; i
++) {
538 struct vop_win
*vop_win
= &vop
->win
[i
];
539 const struct vop_win_data
*win
= vop_win
->data
;
541 spin_lock(&vop
->reg_lock
);
542 VOP_WIN_SET(vop
, win
, enable
, 0);
543 spin_unlock(&vop
->reg_lock
);
549 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
551 vop
->is_enabled
= true;
553 spin_lock(&vop
->reg_lock
);
555 VOP_REG_SET(vop
, common
, standby
, 1);
557 spin_unlock(&vop
->reg_lock
);
559 enable_irq(vop
->irq
);
561 drm_crtc_vblank_on(crtc
);
566 clk_disable(vop
->aclk
);
568 clk_disable(vop
->dclk
);
570 clk_disable(vop
->hclk
);
572 pm_runtime_put_sync(vop
->dev
);
576 static void vop_crtc_atomic_disable(struct drm_crtc
*crtc
,
577 struct drm_crtc_state
*old_state
)
579 struct vop
*vop
= to_vop(crtc
);
583 rockchip_drm_psr_deactivate(&vop
->crtc
);
585 drm_crtc_vblank_off(crtc
);
588 * Vop standby will take effect at end of current frame,
589 * if dsp hold valid irq happen, it means standby complete.
591 * we must wait standby complete when we want to disable aclk,
592 * if not, memory bus maybe dead.
594 reinit_completion(&vop
->dsp_hold_completion
);
595 vop_dsp_hold_valid_irq_enable(vop
);
597 spin_lock(&vop
->reg_lock
);
599 VOP_REG_SET(vop
, common
, standby
, 1);
601 spin_unlock(&vop
->reg_lock
);
603 wait_for_completion(&vop
->dsp_hold_completion
);
605 vop_dsp_hold_valid_irq_disable(vop
);
607 disable_irq(vop
->irq
);
609 vop
->is_enabled
= false;
612 * vop standby complete, so iommu detach is safe.
614 rockchip_drm_dma_detach_device(vop
->drm_dev
, vop
->dev
);
616 clk_disable(vop
->dclk
);
617 clk_disable(vop
->aclk
);
618 clk_disable(vop
->hclk
);
619 pm_runtime_put(vop
->dev
);
621 if (crtc
->state
->event
&& !crtc
->state
->active
) {
622 spin_lock_irq(&crtc
->dev
->event_lock
);
623 drm_crtc_send_vblank_event(crtc
, crtc
->state
->event
);
624 spin_unlock_irq(&crtc
->dev
->event_lock
);
626 crtc
->state
->event
= NULL
;
630 static void vop_plane_destroy(struct drm_plane
*plane
)
632 drm_plane_cleanup(plane
);
635 static int vop_plane_atomic_check(struct drm_plane
*plane
,
636 struct drm_plane_state
*state
)
638 struct drm_crtc
*crtc
= state
->crtc
;
639 struct drm_crtc_state
*crtc_state
;
640 struct drm_framebuffer
*fb
= state
->fb
;
641 struct vop_win
*vop_win
= to_vop_win(plane
);
642 const struct vop_win_data
*win
= vop_win
->data
;
644 struct drm_rect clip
;
645 int min_scale
= win
->phy
->scl
? FRAC_16_16(1, 8) :
646 DRM_PLANE_HELPER_NO_SCALING
;
647 int max_scale
= win
->phy
->scl
? FRAC_16_16(8, 1) :
648 DRM_PLANE_HELPER_NO_SCALING
;
653 crtc_state
= drm_atomic_get_existing_crtc_state(state
->state
, crtc
);
654 if (WARN_ON(!crtc_state
))
659 clip
.x2
= crtc_state
->adjusted_mode
.hdisplay
;
660 clip
.y2
= crtc_state
->adjusted_mode
.vdisplay
;
662 ret
= drm_atomic_helper_check_plane_state(state
, crtc_state
, &clip
,
663 min_scale
, max_scale
,
671 ret
= vop_convert_format(fb
->format
->format
);
676 * Src.x1 can be odd when do clip, but yuv plane start point
677 * need align with 2 pixel.
679 if (is_yuv_support(fb
->format
->format
) && ((state
->src
.x1
>> 16) % 2)) {
680 DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
687 static void vop_plane_atomic_disable(struct drm_plane
*plane
,
688 struct drm_plane_state
*old_state
)
690 struct vop_win
*vop_win
= to_vop_win(plane
);
691 const struct vop_win_data
*win
= vop_win
->data
;
692 struct vop
*vop
= to_vop(old_state
->crtc
);
694 if (!old_state
->crtc
)
697 spin_lock(&vop
->reg_lock
);
699 VOP_WIN_SET(vop
, win
, enable
, 0);
701 spin_unlock(&vop
->reg_lock
);
704 static void vop_plane_atomic_update(struct drm_plane
*plane
,
705 struct drm_plane_state
*old_state
)
707 struct drm_plane_state
*state
= plane
->state
;
708 struct drm_crtc
*crtc
= state
->crtc
;
709 struct vop_win
*vop_win
= to_vop_win(plane
);
710 const struct vop_win_data
*win
= vop_win
->data
;
711 struct vop
*vop
= to_vop(state
->crtc
);
712 struct drm_framebuffer
*fb
= state
->fb
;
713 unsigned int actual_w
, actual_h
;
714 unsigned int dsp_stx
, dsp_sty
;
715 uint32_t act_info
, dsp_info
, dsp_st
;
716 struct drm_rect
*src
= &state
->src
;
717 struct drm_rect
*dest
= &state
->dst
;
718 struct drm_gem_object
*obj
, *uv_obj
;
719 struct rockchip_gem_object
*rk_obj
, *rk_uv_obj
;
720 unsigned long offset
;
727 * can't update plane when vop is disabled.
732 if (WARN_ON(!vop
->is_enabled
))
735 if (!state
->visible
) {
736 vop_plane_atomic_disable(plane
, old_state
);
740 obj
= rockchip_fb_get_gem_obj(fb
, 0);
741 rk_obj
= to_rockchip_obj(obj
);
743 actual_w
= drm_rect_width(src
) >> 16;
744 actual_h
= drm_rect_height(src
) >> 16;
745 act_info
= (actual_h
- 1) << 16 | ((actual_w
- 1) & 0xffff);
747 dsp_info
= (drm_rect_height(dest
) - 1) << 16;
748 dsp_info
|= (drm_rect_width(dest
) - 1) & 0xffff;
750 dsp_stx
= dest
->x1
+ crtc
->mode
.htotal
- crtc
->mode
.hsync_start
;
751 dsp_sty
= dest
->y1
+ crtc
->mode
.vtotal
- crtc
->mode
.vsync_start
;
752 dsp_st
= dsp_sty
<< 16 | (dsp_stx
& 0xffff);
754 offset
= (src
->x1
>> 16) * fb
->format
->cpp
[0];
755 offset
+= (src
->y1
>> 16) * fb
->pitches
[0];
756 dma_addr
= rk_obj
->dma_addr
+ offset
+ fb
->offsets
[0];
758 format
= vop_convert_format(fb
->format
->format
);
760 spin_lock(&vop
->reg_lock
);
762 VOP_WIN_SET(vop
, win
, format
, format
);
763 VOP_WIN_SET(vop
, win
, yrgb_vir
, DIV_ROUND_UP(fb
->pitches
[0], 4));
764 VOP_WIN_SET(vop
, win
, yrgb_mst
, dma_addr
);
765 if (is_yuv_support(fb
->format
->format
)) {
766 int hsub
= drm_format_horz_chroma_subsampling(fb
->format
->format
);
767 int vsub
= drm_format_vert_chroma_subsampling(fb
->format
->format
);
768 int bpp
= fb
->format
->cpp
[1];
770 uv_obj
= rockchip_fb_get_gem_obj(fb
, 1);
771 rk_uv_obj
= to_rockchip_obj(uv_obj
);
773 offset
= (src
->x1
>> 16) * bpp
/ hsub
;
774 offset
+= (src
->y1
>> 16) * fb
->pitches
[1] / vsub
;
776 dma_addr
= rk_uv_obj
->dma_addr
+ offset
+ fb
->offsets
[1];
777 VOP_WIN_SET(vop
, win
, uv_vir
, DIV_ROUND_UP(fb
->pitches
[1], 4));
778 VOP_WIN_SET(vop
, win
, uv_mst
, dma_addr
);
782 scl_vop_cal_scl_fac(vop
, win
, actual_w
, actual_h
,
783 drm_rect_width(dest
), drm_rect_height(dest
),
786 VOP_WIN_SET(vop
, win
, act_info
, act_info
);
787 VOP_WIN_SET(vop
, win
, dsp_info
, dsp_info
);
788 VOP_WIN_SET(vop
, win
, dsp_st
, dsp_st
);
790 rb_swap
= has_rb_swapped(fb
->format
->format
);
791 VOP_WIN_SET(vop
, win
, rb_swap
, rb_swap
);
793 if (is_alpha_support(fb
->format
->format
)) {
794 VOP_WIN_SET(vop
, win
, dst_alpha_ctl
,
795 DST_FACTOR_M0(ALPHA_SRC_INVERSE
));
796 val
= SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL
) |
797 SRC_ALPHA_M0(ALPHA_STRAIGHT
) |
798 SRC_BLEND_M0(ALPHA_PER_PIX
) |
799 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION
) |
800 SRC_FACTOR_M0(ALPHA_ONE
);
801 VOP_WIN_SET(vop
, win
, src_alpha_ctl
, val
);
803 VOP_WIN_SET(vop
, win
, src_alpha_ctl
, SRC_ALPHA_EN(0));
806 VOP_WIN_SET(vop
, win
, enable
, 1);
807 spin_unlock(&vop
->reg_lock
);
810 static const struct drm_plane_helper_funcs plane_helper_funcs
= {
811 .atomic_check
= vop_plane_atomic_check
,
812 .atomic_update
= vop_plane_atomic_update
,
813 .atomic_disable
= vop_plane_atomic_disable
,
816 static const struct drm_plane_funcs vop_plane_funcs
= {
817 .update_plane
= drm_atomic_helper_update_plane
,
818 .disable_plane
= drm_atomic_helper_disable_plane
,
819 .destroy
= vop_plane_destroy
,
820 .reset
= drm_atomic_helper_plane_reset
,
821 .atomic_duplicate_state
= drm_atomic_helper_plane_duplicate_state
,
822 .atomic_destroy_state
= drm_atomic_helper_plane_destroy_state
,
825 static int vop_crtc_enable_vblank(struct drm_crtc
*crtc
)
827 struct vop
*vop
= to_vop(crtc
);
830 if (WARN_ON(!vop
->is_enabled
))
833 spin_lock_irqsave(&vop
->irq_lock
, flags
);
835 VOP_INTR_SET_TYPE(vop
, clear
, FS_INTR
, 1);
836 VOP_INTR_SET_TYPE(vop
, enable
, FS_INTR
, 1);
838 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
843 static void vop_crtc_disable_vblank(struct drm_crtc
*crtc
)
845 struct vop
*vop
= to_vop(crtc
);
848 if (WARN_ON(!vop
->is_enabled
))
851 spin_lock_irqsave(&vop
->irq_lock
, flags
);
853 VOP_INTR_SET_TYPE(vop
, enable
, FS_INTR
, 0);
855 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
858 static bool vop_crtc_mode_fixup(struct drm_crtc
*crtc
,
859 const struct drm_display_mode
*mode
,
860 struct drm_display_mode
*adjusted_mode
)
862 struct vop
*vop
= to_vop(crtc
);
864 adjusted_mode
->clock
=
865 clk_round_rate(vop
->dclk
, mode
->clock
* 1000) / 1000;
870 static void vop_crtc_atomic_enable(struct drm_crtc
*crtc
,
871 struct drm_crtc_state
*old_state
)
873 struct vop
*vop
= to_vop(crtc
);
874 const struct vop_data
*vop_data
= vop
->data
;
875 struct rockchip_crtc_state
*s
= to_rockchip_crtc_state(crtc
->state
);
876 struct drm_display_mode
*adjusted_mode
= &crtc
->state
->adjusted_mode
;
877 u16 hsync_len
= adjusted_mode
->hsync_end
- adjusted_mode
->hsync_start
;
878 u16 hdisplay
= adjusted_mode
->hdisplay
;
879 u16 htotal
= adjusted_mode
->htotal
;
880 u16 hact_st
= adjusted_mode
->htotal
- adjusted_mode
->hsync_start
;
881 u16 hact_end
= hact_st
+ hdisplay
;
882 u16 vdisplay
= adjusted_mode
->vdisplay
;
883 u16 vtotal
= adjusted_mode
->vtotal
;
884 u16 vsync_len
= adjusted_mode
->vsync_end
- adjusted_mode
->vsync_start
;
885 u16 vact_st
= adjusted_mode
->vtotal
- adjusted_mode
->vsync_start
;
886 u16 vact_end
= vact_st
+ vdisplay
;
887 uint32_t pin_pol
, val
;
892 ret
= vop_enable(crtc
);
894 DRM_DEV_ERROR(vop
->dev
, "Failed to enable vop (%d)\n", ret
);
898 pin_pol
= BIT(DCLK_INVERT
);
899 pin_pol
|= (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
) ?
900 BIT(HSYNC_POSITIVE
) : 0;
901 pin_pol
|= (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
) ?
902 BIT(VSYNC_POSITIVE
) : 0;
903 VOP_REG_SET(vop
, output
, pin_pol
, pin_pol
);
905 switch (s
->output_type
) {
906 case DRM_MODE_CONNECTOR_LVDS
:
907 VOP_REG_SET(vop
, output
, rgb_en
, 1);
908 VOP_REG_SET(vop
, output
, rgb_pin_pol
, pin_pol
);
910 case DRM_MODE_CONNECTOR_eDP
:
911 VOP_REG_SET(vop
, output
, edp_pin_pol
, pin_pol
);
912 VOP_REG_SET(vop
, output
, edp_en
, 1);
914 case DRM_MODE_CONNECTOR_HDMIA
:
915 VOP_REG_SET(vop
, output
, hdmi_pin_pol
, pin_pol
);
916 VOP_REG_SET(vop
, output
, hdmi_en
, 1);
918 case DRM_MODE_CONNECTOR_DSI
:
919 VOP_REG_SET(vop
, output
, mipi_pin_pol
, pin_pol
);
920 VOP_REG_SET(vop
, output
, mipi_en
, 1);
922 case DRM_MODE_CONNECTOR_DisplayPort
:
923 pin_pol
&= ~BIT(DCLK_INVERT
);
924 VOP_REG_SET(vop
, output
, dp_pin_pol
, pin_pol
);
925 VOP_REG_SET(vop
, output
, dp_en
, 1);
928 DRM_DEV_ERROR(vop
->dev
, "unsupported connector_type [%d]\n",
933 * if vop is not support RGB10 output, need force RGB10 to RGB888.
935 if (s
->output_mode
== ROCKCHIP_OUT_MODE_AAAA
&&
936 !(vop_data
->feature
& VOP_FEATURE_OUTPUT_RGB10
))
937 s
->output_mode
= ROCKCHIP_OUT_MODE_P888
;
938 VOP_REG_SET(vop
, common
, out_mode
, s
->output_mode
);
940 VOP_REG_SET(vop
, modeset
, htotal_pw
, (htotal
<< 16) | hsync_len
);
943 VOP_REG_SET(vop
, modeset
, hact_st_end
, val
);
944 VOP_REG_SET(vop
, modeset
, hpost_st_end
, val
);
946 VOP_REG_SET(vop
, modeset
, vtotal_pw
, (vtotal
<< 16) | vsync_len
);
949 VOP_REG_SET(vop
, modeset
, vact_st_end
, val
);
950 VOP_REG_SET(vop
, modeset
, vpost_st_end
, val
);
952 VOP_REG_SET(vop
, intr
, line_flag_num
[0], vact_end
);
954 clk_set_rate(vop
->dclk
, adjusted_mode
->clock
* 1000);
956 VOP_REG_SET(vop
, common
, standby
, 0);
958 rockchip_drm_psr_activate(&vop
->crtc
);
961 static bool vop_fs_irq_is_pending(struct vop
*vop
)
963 return VOP_INTR_GET_TYPE(vop
, status
, FS_INTR
);
966 static void vop_wait_for_irq_handler(struct vop
*vop
)
972 * Spin until frame start interrupt status bit goes low, which means
973 * that interrupt handler was invoked and cleared it. The timeout of
974 * 10 msecs is really too long, but it is just a safety measure if
975 * something goes really wrong. The wait will only happen in the very
976 * unlikely case of a vblank happening exactly at the same time and
977 * shouldn't exceed microseconds range.
979 ret
= readx_poll_timeout_atomic(vop_fs_irq_is_pending
, vop
, pending
,
980 !pending
, 0, 10 * 1000);
982 DRM_DEV_ERROR(vop
->dev
, "VOP vblank IRQ stuck for 10 ms\n");
984 synchronize_irq(vop
->irq
);
987 static void vop_crtc_atomic_flush(struct drm_crtc
*crtc
,
988 struct drm_crtc_state
*old_crtc_state
)
990 struct drm_atomic_state
*old_state
= old_crtc_state
->state
;
991 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
992 struct vop
*vop
= to_vop(crtc
);
993 struct drm_plane
*plane
;
996 if (WARN_ON(!vop
->is_enabled
))
999 spin_lock(&vop
->reg_lock
);
1003 spin_unlock(&vop
->reg_lock
);
1006 * There is a (rather unlikely) possiblity that a vblank interrupt
1007 * fired before we set the cfg_done bit. To avoid spuriously
1008 * signalling flip completion we need to wait for it to finish.
1010 vop_wait_for_irq_handler(vop
);
1012 spin_lock_irq(&crtc
->dev
->event_lock
);
1013 if (crtc
->state
->event
) {
1014 WARN_ON(drm_crtc_vblank_get(crtc
) != 0);
1015 WARN_ON(vop
->event
);
1017 vop
->event
= crtc
->state
->event
;
1018 crtc
->state
->event
= NULL
;
1020 spin_unlock_irq(&crtc
->dev
->event_lock
);
1022 for_each_oldnew_plane_in_state(old_state
, plane
, old_plane_state
,
1023 new_plane_state
, i
) {
1024 if (!old_plane_state
->fb
)
1027 if (old_plane_state
->fb
== new_plane_state
->fb
)
1030 drm_framebuffer_get(old_plane_state
->fb
);
1031 drm_flip_work_queue(&vop
->fb_unref_work
, old_plane_state
->fb
);
1032 set_bit(VOP_PENDING_FB_UNREF
, &vop
->pending
);
1033 WARN_ON(drm_crtc_vblank_get(crtc
) != 0);
1037 static void vop_crtc_atomic_begin(struct drm_crtc
*crtc
,
1038 struct drm_crtc_state
*old_crtc_state
)
1040 rockchip_drm_psr_flush(crtc
);
1043 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs
= {
1044 .mode_fixup
= vop_crtc_mode_fixup
,
1045 .atomic_flush
= vop_crtc_atomic_flush
,
1046 .atomic_begin
= vop_crtc_atomic_begin
,
1047 .atomic_enable
= vop_crtc_atomic_enable
,
1048 .atomic_disable
= vop_crtc_atomic_disable
,
1051 static void vop_crtc_destroy(struct drm_crtc
*crtc
)
1053 drm_crtc_cleanup(crtc
);
1056 static void vop_crtc_reset(struct drm_crtc
*crtc
)
1059 __drm_atomic_helper_crtc_destroy_state(crtc
->state
);
1062 crtc
->state
= kzalloc(sizeof(struct rockchip_crtc_state
), GFP_KERNEL
);
1064 crtc
->state
->crtc
= crtc
;
1067 static struct drm_crtc_state
*vop_crtc_duplicate_state(struct drm_crtc
*crtc
)
1069 struct rockchip_crtc_state
*rockchip_state
;
1071 rockchip_state
= kzalloc(sizeof(*rockchip_state
), GFP_KERNEL
);
1072 if (!rockchip_state
)
1075 __drm_atomic_helper_crtc_duplicate_state(crtc
, &rockchip_state
->base
);
1076 return &rockchip_state
->base
;
1079 static void vop_crtc_destroy_state(struct drm_crtc
*crtc
,
1080 struct drm_crtc_state
*state
)
1082 struct rockchip_crtc_state
*s
= to_rockchip_crtc_state(state
);
1084 __drm_atomic_helper_crtc_destroy_state(&s
->base
);
1088 #ifdef CONFIG_DRM_ANALOGIX_DP
1089 static struct drm_connector
*vop_get_edp_connector(struct vop
*vop
)
1091 struct drm_connector
*connector
;
1092 struct drm_connector_list_iter conn_iter
;
1094 drm_connector_list_iter_begin(vop
->drm_dev
, &conn_iter
);
1095 drm_for_each_connector_iter(connector
, &conn_iter
) {
1096 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
) {
1097 drm_connector_list_iter_end(&conn_iter
);
1101 drm_connector_list_iter_end(&conn_iter
);
1106 static int vop_crtc_set_crc_source(struct drm_crtc
*crtc
,
1107 const char *source_name
, size_t *values_cnt
)
1109 struct vop
*vop
= to_vop(crtc
);
1110 struct drm_connector
*connector
;
1113 connector
= vop_get_edp_connector(vop
);
1119 if (source_name
&& strcmp(source_name
, "auto") == 0)
1120 ret
= analogix_dp_start_crc(connector
);
1121 else if (!source_name
)
1122 ret
= analogix_dp_stop_crc(connector
);
1129 static int vop_crtc_set_crc_source(struct drm_crtc
*crtc
,
1130 const char *source_name
, size_t *values_cnt
)
1136 static const struct drm_crtc_funcs vop_crtc_funcs
= {
1137 .set_config
= drm_atomic_helper_set_config
,
1138 .page_flip
= drm_atomic_helper_page_flip
,
1139 .destroy
= vop_crtc_destroy
,
1140 .reset
= vop_crtc_reset
,
1141 .atomic_duplicate_state
= vop_crtc_duplicate_state
,
1142 .atomic_destroy_state
= vop_crtc_destroy_state
,
1143 .enable_vblank
= vop_crtc_enable_vblank
,
1144 .disable_vblank
= vop_crtc_disable_vblank
,
1145 .set_crc_source
= vop_crtc_set_crc_source
,
1148 static void vop_fb_unref_worker(struct drm_flip_work
*work
, void *val
)
1150 struct vop
*vop
= container_of(work
, struct vop
, fb_unref_work
);
1151 struct drm_framebuffer
*fb
= val
;
1153 drm_crtc_vblank_put(&vop
->crtc
);
1154 drm_framebuffer_put(fb
);
1157 static void vop_handle_vblank(struct vop
*vop
)
1159 struct drm_device
*drm
= vop
->drm_dev
;
1160 struct drm_crtc
*crtc
= &vop
->crtc
;
1161 unsigned long flags
;
1163 spin_lock_irqsave(&drm
->event_lock
, flags
);
1165 drm_crtc_send_vblank_event(crtc
, vop
->event
);
1166 drm_crtc_vblank_put(crtc
);
1169 spin_unlock_irqrestore(&drm
->event_lock
, flags
);
1171 if (test_and_clear_bit(VOP_PENDING_FB_UNREF
, &vop
->pending
))
1172 drm_flip_work_commit(&vop
->fb_unref_work
, system_unbound_wq
);
1175 static irqreturn_t
vop_isr(int irq
, void *data
)
1177 struct vop
*vop
= data
;
1178 struct drm_crtc
*crtc
= &vop
->crtc
;
1179 uint32_t active_irqs
;
1180 unsigned long flags
;
1184 * interrupt register has interrupt status, enable and clear bits, we
1185 * must hold irq_lock to avoid a race with enable/disable_vblank().
1187 spin_lock_irqsave(&vop
->irq_lock
, flags
);
1189 active_irqs
= VOP_INTR_GET_TYPE(vop
, status
, INTR_MASK
);
1190 /* Clear all active interrupt sources */
1192 VOP_INTR_SET_TYPE(vop
, clear
, active_irqs
, 1);
1194 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
1196 /* This is expected for vop iommu irqs, since the irq is shared */
1200 if (active_irqs
& DSP_HOLD_VALID_INTR
) {
1201 complete(&vop
->dsp_hold_completion
);
1202 active_irqs
&= ~DSP_HOLD_VALID_INTR
;
1206 if (active_irqs
& LINE_FLAG_INTR
) {
1207 complete(&vop
->line_flag_completion
);
1208 active_irqs
&= ~LINE_FLAG_INTR
;
1212 if (active_irqs
& FS_INTR
) {
1213 drm_crtc_handle_vblank(crtc
);
1214 vop_handle_vblank(vop
);
1215 active_irqs
&= ~FS_INTR
;
1219 /* Unhandled irqs are spurious. */
1221 DRM_DEV_ERROR(vop
->dev
, "Unknown VOP IRQs: %#02x\n",
1227 static int vop_create_crtc(struct vop
*vop
)
1229 const struct vop_data
*vop_data
= vop
->data
;
1230 struct device
*dev
= vop
->dev
;
1231 struct drm_device
*drm_dev
= vop
->drm_dev
;
1232 struct drm_plane
*primary
= NULL
, *cursor
= NULL
, *plane
, *tmp
;
1233 struct drm_crtc
*crtc
= &vop
->crtc
;
1234 struct device_node
*port
;
1239 * Create drm_plane for primary and cursor planes first, since we need
1240 * to pass them to drm_crtc_init_with_planes, which sets the
1241 * "possible_crtcs" to the newly initialized crtc.
1243 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1244 struct vop_win
*vop_win
= &vop
->win
[i
];
1245 const struct vop_win_data
*win_data
= vop_win
->data
;
1247 if (win_data
->type
!= DRM_PLANE_TYPE_PRIMARY
&&
1248 win_data
->type
!= DRM_PLANE_TYPE_CURSOR
)
1251 ret
= drm_universal_plane_init(vop
->drm_dev
, &vop_win
->base
,
1252 0, &vop_plane_funcs
,
1253 win_data
->phy
->data_formats
,
1254 win_data
->phy
->nformats
,
1255 NULL
, win_data
->type
, NULL
);
1257 DRM_DEV_ERROR(vop
->dev
, "failed to init plane %d\n",
1259 goto err_cleanup_planes
;
1262 plane
= &vop_win
->base
;
1263 drm_plane_helper_add(plane
, &plane_helper_funcs
);
1264 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
1266 else if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
1270 ret
= drm_crtc_init_with_planes(drm_dev
, crtc
, primary
, cursor
,
1271 &vop_crtc_funcs
, NULL
);
1273 goto err_cleanup_planes
;
1275 drm_crtc_helper_add(crtc
, &vop_crtc_helper_funcs
);
1278 * Create drm_planes for overlay windows with possible_crtcs restricted
1279 * to the newly created crtc.
1281 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1282 struct vop_win
*vop_win
= &vop
->win
[i
];
1283 const struct vop_win_data
*win_data
= vop_win
->data
;
1284 unsigned long possible_crtcs
= 1 << drm_crtc_index(crtc
);
1286 if (win_data
->type
!= DRM_PLANE_TYPE_OVERLAY
)
1289 ret
= drm_universal_plane_init(vop
->drm_dev
, &vop_win
->base
,
1292 win_data
->phy
->data_formats
,
1293 win_data
->phy
->nformats
,
1294 NULL
, win_data
->type
, NULL
);
1296 DRM_DEV_ERROR(vop
->dev
, "failed to init overlay %d\n",
1298 goto err_cleanup_crtc
;
1300 drm_plane_helper_add(&vop_win
->base
, &plane_helper_funcs
);
1303 port
= of_get_child_by_name(dev
->of_node
, "port");
1305 DRM_DEV_ERROR(vop
->dev
, "no port node found in %pOF\n",
1308 goto err_cleanup_crtc
;
1311 drm_flip_work_init(&vop
->fb_unref_work
, "fb_unref",
1312 vop_fb_unref_worker
);
1314 init_completion(&vop
->dsp_hold_completion
);
1315 init_completion(&vop
->line_flag_completion
);
1321 drm_crtc_cleanup(crtc
);
1323 list_for_each_entry_safe(plane
, tmp
, &drm_dev
->mode_config
.plane_list
,
1325 drm_plane_cleanup(plane
);
1329 static void vop_destroy_crtc(struct vop
*vop
)
1331 struct drm_crtc
*crtc
= &vop
->crtc
;
1332 struct drm_device
*drm_dev
= vop
->drm_dev
;
1333 struct drm_plane
*plane
, *tmp
;
1335 of_node_put(crtc
->port
);
1338 * We need to cleanup the planes now. Why?
1340 * The planes are "&vop->win[i].base". That means the memory is
1341 * all part of the big "struct vop" chunk of memory. That memory
1342 * was devm allocated and associated with this component. We need to
1343 * free it ourselves before vop_unbind() finishes.
1345 list_for_each_entry_safe(plane
, tmp
, &drm_dev
->mode_config
.plane_list
,
1347 vop_plane_destroy(plane
);
1350 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
1351 * references the CRTC.
1353 drm_crtc_cleanup(crtc
);
1354 drm_flip_work_cleanup(&vop
->fb_unref_work
);
1357 static int vop_initial(struct vop
*vop
)
1359 const struct vop_data
*vop_data
= vop
->data
;
1360 struct reset_control
*ahb_rst
;
1363 vop
->hclk
= devm_clk_get(vop
->dev
, "hclk_vop");
1364 if (IS_ERR(vop
->hclk
)) {
1365 DRM_DEV_ERROR(vop
->dev
, "failed to get hclk source\n");
1366 return PTR_ERR(vop
->hclk
);
1368 vop
->aclk
= devm_clk_get(vop
->dev
, "aclk_vop");
1369 if (IS_ERR(vop
->aclk
)) {
1370 DRM_DEV_ERROR(vop
->dev
, "failed to get aclk source\n");
1371 return PTR_ERR(vop
->aclk
);
1373 vop
->dclk
= devm_clk_get(vop
->dev
, "dclk_vop");
1374 if (IS_ERR(vop
->dclk
)) {
1375 DRM_DEV_ERROR(vop
->dev
, "failed to get dclk source\n");
1376 return PTR_ERR(vop
->dclk
);
1379 ret
= pm_runtime_get_sync(vop
->dev
);
1381 DRM_DEV_ERROR(vop
->dev
, "failed to get pm runtime: %d\n", ret
);
1385 ret
= clk_prepare(vop
->dclk
);
1387 DRM_DEV_ERROR(vop
->dev
, "failed to prepare dclk\n");
1388 goto err_put_pm_runtime
;
1391 /* Enable both the hclk and aclk to setup the vop */
1392 ret
= clk_prepare_enable(vop
->hclk
);
1394 DRM_DEV_ERROR(vop
->dev
, "failed to prepare/enable hclk\n");
1395 goto err_unprepare_dclk
;
1398 ret
= clk_prepare_enable(vop
->aclk
);
1400 DRM_DEV_ERROR(vop
->dev
, "failed to prepare/enable aclk\n");
1401 goto err_disable_hclk
;
1405 * do hclk_reset, reset all vop registers.
1407 ahb_rst
= devm_reset_control_get(vop
->dev
, "ahb");
1408 if (IS_ERR(ahb_rst
)) {
1409 DRM_DEV_ERROR(vop
->dev
, "failed to get ahb reset\n");
1410 ret
= PTR_ERR(ahb_rst
);
1411 goto err_disable_aclk
;
1413 reset_control_assert(ahb_rst
);
1414 usleep_range(10, 20);
1415 reset_control_deassert(ahb_rst
);
1417 memcpy(vop
->regsbak
, vop
->regs
, vop
->len
);
1419 VOP_REG_SET(vop
, misc
, global_regdone_en
, 1);
1420 VOP_REG_SET(vop
, common
, dsp_blank
, 0);
1422 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1423 const struct vop_win_data
*win
= &vop_data
->win
[i
];
1424 int channel
= i
* 2 + 1;
1426 VOP_WIN_SET(vop
, win
, channel
, (channel
+ 1) << 4 | channel
);
1427 VOP_WIN_SET(vop
, win
, enable
, 0);
1428 VOP_WIN_SET(vop
, win
, gate
, 1);
1434 * do dclk_reset, let all config take affect.
1436 vop
->dclk_rst
= devm_reset_control_get(vop
->dev
, "dclk");
1437 if (IS_ERR(vop
->dclk_rst
)) {
1438 DRM_DEV_ERROR(vop
->dev
, "failed to get dclk reset\n");
1439 ret
= PTR_ERR(vop
->dclk_rst
);
1440 goto err_disable_aclk
;
1442 reset_control_assert(vop
->dclk_rst
);
1443 usleep_range(10, 20);
1444 reset_control_deassert(vop
->dclk_rst
);
1446 clk_disable(vop
->hclk
);
1447 clk_disable(vop
->aclk
);
1449 vop
->is_enabled
= false;
1451 pm_runtime_put_sync(vop
->dev
);
1456 clk_disable_unprepare(vop
->aclk
);
1458 clk_disable_unprepare(vop
->hclk
);
1460 clk_unprepare(vop
->dclk
);
1462 pm_runtime_put_sync(vop
->dev
);
1467 * Initialize the vop->win array elements.
1469 static void vop_win_init(struct vop
*vop
)
1471 const struct vop_data
*vop_data
= vop
->data
;
1474 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1475 struct vop_win
*vop_win
= &vop
->win
[i
];
1476 const struct vop_win_data
*win_data
= &vop_data
->win
[i
];
1478 vop_win
->data
= win_data
;
1484 * rockchip_drm_wait_vact_end
1485 * @crtc: CRTC to enable line flag
1486 * @mstimeout: millisecond for timeout
1488 * Wait for vact_end line flag irq or timeout.
1491 * Zero on success, negative errno on failure.
1493 int rockchip_drm_wait_vact_end(struct drm_crtc
*crtc
, unsigned int mstimeout
)
1495 struct vop
*vop
= to_vop(crtc
);
1496 unsigned long jiffies_left
;
1498 if (!crtc
|| !vop
->is_enabled
)
1504 if (vop_line_flag_irq_is_enabled(vop
))
1507 reinit_completion(&vop
->line_flag_completion
);
1508 vop_line_flag_irq_enable(vop
);
1510 jiffies_left
= wait_for_completion_timeout(&vop
->line_flag_completion
,
1511 msecs_to_jiffies(mstimeout
));
1512 vop_line_flag_irq_disable(vop
);
1514 if (jiffies_left
== 0) {
1515 DRM_DEV_ERROR(vop
->dev
, "Timeout waiting for IRQ\n");
1521 EXPORT_SYMBOL(rockchip_drm_wait_vact_end
);
1523 static int vop_bind(struct device
*dev
, struct device
*master
, void *data
)
1525 struct platform_device
*pdev
= to_platform_device(dev
);
1526 const struct vop_data
*vop_data
;
1527 struct drm_device
*drm_dev
= data
;
1529 struct resource
*res
;
1533 vop_data
= of_device_get_match_data(dev
);
1537 /* Allocate vop struct and its vop_win array */
1538 alloc_size
= sizeof(*vop
) + sizeof(*vop
->win
) * vop_data
->win_size
;
1539 vop
= devm_kzalloc(dev
, alloc_size
, GFP_KERNEL
);
1544 vop
->data
= vop_data
;
1545 vop
->drm_dev
= drm_dev
;
1546 dev_set_drvdata(dev
, vop
);
1550 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1551 vop
->len
= resource_size(res
);
1552 vop
->regs
= devm_ioremap_resource(dev
, res
);
1553 if (IS_ERR(vop
->regs
))
1554 return PTR_ERR(vop
->regs
);
1556 vop
->regsbak
= devm_kzalloc(dev
, vop
->len
, GFP_KERNEL
);
1560 irq
= platform_get_irq(pdev
, 0);
1562 DRM_DEV_ERROR(dev
, "cannot find irq for vop\n");
1565 vop
->irq
= (unsigned int)irq
;
1567 spin_lock_init(&vop
->reg_lock
);
1568 spin_lock_init(&vop
->irq_lock
);
1570 mutex_init(&vop
->vsync_mutex
);
1572 ret
= devm_request_irq(dev
, vop
->irq
, vop_isr
,
1573 IRQF_SHARED
, dev_name(dev
), vop
);
1577 /* IRQ is initially disabled; it gets enabled in power_on */
1578 disable_irq(vop
->irq
);
1580 ret
= vop_create_crtc(vop
);
1582 goto err_enable_irq
;
1584 pm_runtime_enable(&pdev
->dev
);
1586 ret
= vop_initial(vop
);
1588 DRM_DEV_ERROR(&pdev
->dev
,
1589 "cannot initial vop dev - err %d\n", ret
);
1590 goto err_disable_pm_runtime
;
1595 err_disable_pm_runtime
:
1596 pm_runtime_disable(&pdev
->dev
);
1597 vop_destroy_crtc(vop
);
1599 enable_irq(vop
->irq
); /* To balance out the disable_irq above */
1603 static void vop_unbind(struct device
*dev
, struct device
*master
, void *data
)
1605 struct vop
*vop
= dev_get_drvdata(dev
);
1607 pm_runtime_disable(dev
);
1608 vop_destroy_crtc(vop
);
1610 clk_unprepare(vop
->aclk
);
1611 clk_unprepare(vop
->hclk
);
1612 clk_unprepare(vop
->dclk
);
1615 const struct component_ops vop_component_ops
= {
1617 .unbind
= vop_unbind
,
1619 EXPORT_SYMBOL_GPL(vop_component_ops
);