2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_crtc.h>
19 #include <drm/drm_crtc_helper.h>
20 #include <drm/drm_plane_helper.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/clk.h>
27 #include <linux/of_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/component.h>
31 #include <linux/reset.h>
32 #include <linux/delay.h>
34 #include "rockchip_drm_drv.h"
35 #include "rockchip_drm_gem.h"
36 #include "rockchip_drm_fb.h"
37 #include "rockchip_drm_vop.h"
39 #define __REG_SET_RELAXED(x, off, mask, shift, v) \
40 vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
41 #define __REG_SET_NORMAL(x, off, mask, shift, v) \
42 vop_mask_write(x, off, (mask) << shift, (v) << shift)
44 #define REG_SET(x, base, reg, v, mode) \
45 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
46 #define REG_SET_MASK(x, base, reg, mask, v, mode) \
47 __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v)
49 #define VOP_WIN_SET(x, win, name, v) \
50 REG_SET(x, win->base, win->phy->name, v, RELAXED)
51 #define VOP_SCL_SET(x, win, name, v) \
52 REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
53 #define VOP_SCL_SET_EXT(x, win, name, v) \
54 REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
55 #define VOP_CTRL_SET(x, name, v) \
56 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
58 #define VOP_INTR_GET(vop, name) \
59 vop_read_reg(vop, 0, &vop->data->ctrl->name)
61 #define VOP_INTR_SET(vop, name, mask, v) \
62 REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
63 #define VOP_INTR_SET_TYPE(vop, name, type, v) \
65 int i, reg = 0, mask = 0; \
66 for (i = 0; i < vop->data->intr->nintrs; i++) { \
67 if (vop->data->intr->intrs[i] & type) { \
72 VOP_INTR_SET(vop, name, mask, reg); \
74 #define VOP_INTR_GET_TYPE(vop, name, type) \
75 vop_get_intr_type(vop, &vop->data->intr->name, type)
77 #define VOP_WIN_GET(x, win, name) \
78 vop_read_reg(x, win->base, &win->phy->name)
80 #define VOP_WIN_GET_YRGBADDR(vop, win) \
81 vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
83 #define to_vop(x) container_of(x, struct vop, crtc)
84 #define to_vop_win(x) container_of(x, struct vop_win, base)
85 #define to_vop_plane_state(x) container_of(x, struct vop_plane_state, base)
87 struct vop_plane_state
{
88 struct drm_plane_state base
;
97 struct drm_plane base
;
98 const struct vop_win_data
*data
;
101 struct vop_plane_state state
;
105 struct drm_crtc crtc
;
107 struct drm_device
*drm_dev
;
110 /* mutex vsync_ work */
111 struct mutex vsync_mutex
;
112 bool vsync_work_pending
;
113 struct completion dsp_hold_completion
;
114 struct completion wait_update_complete
;
115 struct drm_pending_vblank_event
*event
;
117 const struct vop_data
*data
;
122 /* physical map length of vop register */
125 /* one time only one process allowed to config the register */
127 /* lock vop irq reg */
136 /* vop share memory frequency */
140 struct reset_control
*dclk_rst
;
142 struct vop_win win
[];
145 static inline void vop_writel(struct vop
*vop
, uint32_t offset
, uint32_t v
)
147 writel(v
, vop
->regs
+ offset
);
148 vop
->regsbak
[offset
>> 2] = v
;
151 static inline uint32_t vop_readl(struct vop
*vop
, uint32_t offset
)
153 return readl(vop
->regs
+ offset
);
156 static inline uint32_t vop_read_reg(struct vop
*vop
, uint32_t base
,
157 const struct vop_reg
*reg
)
159 return (vop_readl(vop
, base
+ reg
->offset
) >> reg
->shift
) & reg
->mask
;
162 static inline void vop_mask_write(struct vop
*vop
, uint32_t offset
,
163 uint32_t mask
, uint32_t v
)
166 uint32_t cached_val
= vop
->regsbak
[offset
>> 2];
168 cached_val
= (cached_val
& ~mask
) | v
;
169 writel(cached_val
, vop
->regs
+ offset
);
170 vop
->regsbak
[offset
>> 2] = cached_val
;
174 static inline void vop_mask_write_relaxed(struct vop
*vop
, uint32_t offset
,
175 uint32_t mask
, uint32_t v
)
178 uint32_t cached_val
= vop
->regsbak
[offset
>> 2];
180 cached_val
= (cached_val
& ~mask
) | v
;
181 writel_relaxed(cached_val
, vop
->regs
+ offset
);
182 vop
->regsbak
[offset
>> 2] = cached_val
;
186 static inline uint32_t vop_get_intr_type(struct vop
*vop
,
187 const struct vop_reg
*reg
, int type
)
190 uint32_t regs
= vop_read_reg(vop
, 0, reg
);
192 for (i
= 0; i
< vop
->data
->intr
->nintrs
; i
++) {
193 if ((type
& vop
->data
->intr
->intrs
[i
]) && (regs
& 1 << i
))
194 ret
|= vop
->data
->intr
->intrs
[i
];
200 static inline void vop_cfg_done(struct vop
*vop
)
202 VOP_CTRL_SET(vop
, cfg_done
, 1);
205 static bool has_rb_swapped(uint32_t format
)
208 case DRM_FORMAT_XBGR8888
:
209 case DRM_FORMAT_ABGR8888
:
210 case DRM_FORMAT_BGR888
:
211 case DRM_FORMAT_BGR565
:
218 static enum vop_data_format
vop_convert_format(uint32_t format
)
221 case DRM_FORMAT_XRGB8888
:
222 case DRM_FORMAT_ARGB8888
:
223 case DRM_FORMAT_XBGR8888
:
224 case DRM_FORMAT_ABGR8888
:
225 return VOP_FMT_ARGB8888
;
226 case DRM_FORMAT_RGB888
:
227 case DRM_FORMAT_BGR888
:
228 return VOP_FMT_RGB888
;
229 case DRM_FORMAT_RGB565
:
230 case DRM_FORMAT_BGR565
:
231 return VOP_FMT_RGB565
;
232 case DRM_FORMAT_NV12
:
233 return VOP_FMT_YUV420SP
;
234 case DRM_FORMAT_NV16
:
235 return VOP_FMT_YUV422SP
;
236 case DRM_FORMAT_NV24
:
237 return VOP_FMT_YUV444SP
;
239 DRM_ERROR("unsupport format[%08x]\n", format
);
244 static bool is_yuv_support(uint32_t format
)
247 case DRM_FORMAT_NV12
:
248 case DRM_FORMAT_NV16
:
249 case DRM_FORMAT_NV24
:
256 static bool is_alpha_support(uint32_t format
)
259 case DRM_FORMAT_ARGB8888
:
260 case DRM_FORMAT_ABGR8888
:
267 static uint16_t scl_vop_cal_scale(enum scale_mode mode
, uint32_t src
,
268 uint32_t dst
, bool is_horizontal
,
269 int vsu_mode
, int *vskiplines
)
271 uint16_t val
= 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT
;
274 if (mode
== SCALE_UP
)
275 val
= GET_SCL_FT_BIC(src
, dst
);
276 else if (mode
== SCALE_DOWN
)
277 val
= GET_SCL_FT_BILI_DN(src
, dst
);
279 if (mode
== SCALE_UP
) {
280 if (vsu_mode
== SCALE_UP_BIL
)
281 val
= GET_SCL_FT_BILI_UP(src
, dst
);
283 val
= GET_SCL_FT_BIC(src
, dst
);
284 } else if (mode
== SCALE_DOWN
) {
286 *vskiplines
= scl_get_vskiplines(src
, dst
);
287 val
= scl_get_bili_dn_vskip(src
, dst
,
290 val
= GET_SCL_FT_BILI_DN(src
, dst
);
298 static void scl_vop_cal_scl_fac(struct vop
*vop
, const struct vop_win_data
*win
,
299 uint32_t src_w
, uint32_t src_h
, uint32_t dst_w
,
300 uint32_t dst_h
, uint32_t pixel_format
)
302 uint16_t yrgb_hor_scl_mode
, yrgb_ver_scl_mode
;
303 uint16_t cbcr_hor_scl_mode
= SCALE_NONE
;
304 uint16_t cbcr_ver_scl_mode
= SCALE_NONE
;
305 int hsub
= drm_format_horz_chroma_subsampling(pixel_format
);
306 int vsub
= drm_format_vert_chroma_subsampling(pixel_format
);
307 bool is_yuv
= is_yuv_support(pixel_format
);
308 uint16_t cbcr_src_w
= src_w
/ hsub
;
309 uint16_t cbcr_src_h
= src_h
/ vsub
;
316 DRM_ERROR("Maximum destination width (3840) exceeded\n");
320 if (!win
->phy
->scl
->ext
) {
321 VOP_SCL_SET(vop
, win
, scale_yrgb_x
,
322 scl_cal_scale2(src_w
, dst_w
));
323 VOP_SCL_SET(vop
, win
, scale_yrgb_y
,
324 scl_cal_scale2(src_h
, dst_h
));
326 VOP_SCL_SET(vop
, win
, scale_cbcr_x
,
327 scl_cal_scale2(src_w
, dst_w
));
328 VOP_SCL_SET(vop
, win
, scale_cbcr_y
,
329 scl_cal_scale2(src_h
, dst_h
));
334 yrgb_hor_scl_mode
= scl_get_scl_mode(src_w
, dst_w
);
335 yrgb_ver_scl_mode
= scl_get_scl_mode(src_h
, dst_h
);
338 cbcr_hor_scl_mode
= scl_get_scl_mode(cbcr_src_w
, dst_w
);
339 cbcr_ver_scl_mode
= scl_get_scl_mode(cbcr_src_h
, dst_h
);
340 if (cbcr_hor_scl_mode
== SCALE_DOWN
)
341 lb_mode
= scl_vop_cal_lb_mode(dst_w
, true);
343 lb_mode
= scl_vop_cal_lb_mode(cbcr_src_w
, true);
345 if (yrgb_hor_scl_mode
== SCALE_DOWN
)
346 lb_mode
= scl_vop_cal_lb_mode(dst_w
, false);
348 lb_mode
= scl_vop_cal_lb_mode(src_w
, false);
351 VOP_SCL_SET_EXT(vop
, win
, lb_mode
, lb_mode
);
352 if (lb_mode
== LB_RGB_3840X2
) {
353 if (yrgb_ver_scl_mode
!= SCALE_NONE
) {
354 DRM_ERROR("ERROR : not allow yrgb ver scale\n");
357 if (cbcr_ver_scl_mode
!= SCALE_NONE
) {
358 DRM_ERROR("ERROR : not allow cbcr ver scale\n");
361 vsu_mode
= SCALE_UP_BIL
;
362 } else if (lb_mode
== LB_RGB_2560X4
) {
363 vsu_mode
= SCALE_UP_BIL
;
365 vsu_mode
= SCALE_UP_BIC
;
368 val
= scl_vop_cal_scale(yrgb_hor_scl_mode
, src_w
, dst_w
,
370 VOP_SCL_SET(vop
, win
, scale_yrgb_x
, val
);
371 val
= scl_vop_cal_scale(yrgb_ver_scl_mode
, src_h
, dst_h
,
372 false, vsu_mode
, &vskiplines
);
373 VOP_SCL_SET(vop
, win
, scale_yrgb_y
, val
);
375 VOP_SCL_SET_EXT(vop
, win
, vsd_yrgb_gt4
, vskiplines
== 4);
376 VOP_SCL_SET_EXT(vop
, win
, vsd_yrgb_gt2
, vskiplines
== 2);
378 VOP_SCL_SET_EXT(vop
, win
, yrgb_hor_scl_mode
, yrgb_hor_scl_mode
);
379 VOP_SCL_SET_EXT(vop
, win
, yrgb_ver_scl_mode
, yrgb_ver_scl_mode
);
380 VOP_SCL_SET_EXT(vop
, win
, yrgb_hsd_mode
, SCALE_DOWN_BIL
);
381 VOP_SCL_SET_EXT(vop
, win
, yrgb_vsd_mode
, SCALE_DOWN_BIL
);
382 VOP_SCL_SET_EXT(vop
, win
, yrgb_vsu_mode
, vsu_mode
);
384 val
= scl_vop_cal_scale(cbcr_hor_scl_mode
, cbcr_src_w
,
385 dst_w
, true, 0, NULL
);
386 VOP_SCL_SET(vop
, win
, scale_cbcr_x
, val
);
387 val
= scl_vop_cal_scale(cbcr_ver_scl_mode
, cbcr_src_h
,
388 dst_h
, false, vsu_mode
, &vskiplines
);
389 VOP_SCL_SET(vop
, win
, scale_cbcr_y
, val
);
391 VOP_SCL_SET_EXT(vop
, win
, vsd_cbcr_gt4
, vskiplines
== 4);
392 VOP_SCL_SET_EXT(vop
, win
, vsd_cbcr_gt2
, vskiplines
== 2);
393 VOP_SCL_SET_EXT(vop
, win
, cbcr_hor_scl_mode
, cbcr_hor_scl_mode
);
394 VOP_SCL_SET_EXT(vop
, win
, cbcr_ver_scl_mode
, cbcr_ver_scl_mode
);
395 VOP_SCL_SET_EXT(vop
, win
, cbcr_hsd_mode
, SCALE_DOWN_BIL
);
396 VOP_SCL_SET_EXT(vop
, win
, cbcr_vsd_mode
, SCALE_DOWN_BIL
);
397 VOP_SCL_SET_EXT(vop
, win
, cbcr_vsu_mode
, vsu_mode
);
401 static void vop_dsp_hold_valid_irq_enable(struct vop
*vop
)
405 if (WARN_ON(!vop
->is_enabled
))
408 spin_lock_irqsave(&vop
->irq_lock
, flags
);
410 VOP_INTR_SET_TYPE(vop
, enable
, DSP_HOLD_VALID_INTR
, 1);
412 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
415 static void vop_dsp_hold_valid_irq_disable(struct vop
*vop
)
419 if (WARN_ON(!vop
->is_enabled
))
422 spin_lock_irqsave(&vop
->irq_lock
, flags
);
424 VOP_INTR_SET_TYPE(vop
, enable
, DSP_HOLD_VALID_INTR
, 0);
426 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
429 static void vop_enable(struct drm_crtc
*crtc
)
431 struct vop
*vop
= to_vop(crtc
);
437 ret
= pm_runtime_get_sync(vop
->dev
);
439 dev_err(vop
->dev
, "failed to get pm runtime: %d\n", ret
);
443 ret
= clk_enable(vop
->hclk
);
445 dev_err(vop
->dev
, "failed to enable hclk - %d\n", ret
);
449 ret
= clk_enable(vop
->dclk
);
451 dev_err(vop
->dev
, "failed to enable dclk - %d\n", ret
);
452 goto err_disable_hclk
;
455 ret
= clk_enable(vop
->aclk
);
457 dev_err(vop
->dev
, "failed to enable aclk - %d\n", ret
);
458 goto err_disable_dclk
;
462 * Slave iommu shares power, irq and clock with vop. It was associated
463 * automatically with this master device via common driver code.
464 * Now that we have enabled the clock we attach it to the shared drm
467 ret
= rockchip_drm_dma_attach_device(vop
->drm_dev
, vop
->dev
);
469 dev_err(vop
->dev
, "failed to attach dma mapping, %d\n", ret
);
470 goto err_disable_aclk
;
473 memcpy(vop
->regs
, vop
->regsbak
, vop
->len
);
475 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
477 vop
->is_enabled
= true;
479 spin_lock(&vop
->reg_lock
);
481 VOP_CTRL_SET(vop
, standby
, 0);
483 spin_unlock(&vop
->reg_lock
);
485 enable_irq(vop
->irq
);
487 drm_crtc_vblank_on(crtc
);
492 clk_disable(vop
->aclk
);
494 clk_disable(vop
->dclk
);
496 clk_disable(vop
->hclk
);
499 static void vop_crtc_disable(struct drm_crtc
*crtc
)
501 struct vop
*vop
= to_vop(crtc
);
504 if (!vop
->is_enabled
)
508 * We need to make sure that all windows are disabled before we
509 * disable that crtc. Otherwise we might try to scan from a destroyed
512 for (i
= 0; i
< vop
->data
->win_size
; i
++) {
513 struct vop_win
*vop_win
= &vop
->win
[i
];
514 const struct vop_win_data
*win
= vop_win
->data
;
516 spin_lock(&vop
->reg_lock
);
517 VOP_WIN_SET(vop
, win
, enable
, 0);
518 spin_unlock(&vop
->reg_lock
);
521 drm_crtc_vblank_off(crtc
);
524 * Vop standby will take effect at end of current frame,
525 * if dsp hold valid irq happen, it means standby complete.
527 * we must wait standby complete when we want to disable aclk,
528 * if not, memory bus maybe dead.
530 reinit_completion(&vop
->dsp_hold_completion
);
531 vop_dsp_hold_valid_irq_enable(vop
);
533 spin_lock(&vop
->reg_lock
);
535 VOP_CTRL_SET(vop
, standby
, 1);
537 spin_unlock(&vop
->reg_lock
);
539 wait_for_completion(&vop
->dsp_hold_completion
);
541 vop_dsp_hold_valid_irq_disable(vop
);
543 disable_irq(vop
->irq
);
545 vop
->is_enabled
= false;
548 * vop standby complete, so iommu detach is safe.
550 rockchip_drm_dma_detach_device(vop
->drm_dev
, vop
->dev
);
552 clk_disable(vop
->dclk
);
553 clk_disable(vop
->aclk
);
554 clk_disable(vop
->hclk
);
555 pm_runtime_put(vop
->dev
);
558 static void vop_plane_destroy(struct drm_plane
*plane
)
560 drm_plane_cleanup(plane
);
563 static int vop_plane_atomic_check(struct drm_plane
*plane
,
564 struct drm_plane_state
*state
)
566 struct drm_crtc
*crtc
= state
->crtc
;
567 struct drm_crtc_state
*crtc_state
;
568 struct drm_framebuffer
*fb
= state
->fb
;
569 struct vop_win
*vop_win
= to_vop_win(plane
);
570 struct vop_plane_state
*vop_plane_state
= to_vop_plane_state(state
);
571 const struct vop_win_data
*win
= vop_win
->data
;
574 struct drm_rect
*dest
= &vop_plane_state
->dest
;
575 struct drm_rect
*src
= &vop_plane_state
->src
;
576 struct drm_rect clip
;
577 int min_scale
= win
->phy
->scl
? FRAC_16_16(1, 8) :
578 DRM_PLANE_HELPER_NO_SCALING
;
579 int max_scale
= win
->phy
->scl
? FRAC_16_16(8, 1) :
580 DRM_PLANE_HELPER_NO_SCALING
;
585 crtc_state
= drm_atomic_get_existing_crtc_state(state
->state
, crtc
);
586 if (WARN_ON(!crtc_state
))
589 src
->x1
= state
->src_x
;
590 src
->y1
= state
->src_y
;
591 src
->x2
= state
->src_x
+ state
->src_w
;
592 src
->y2
= state
->src_y
+ state
->src_h
;
593 dest
->x1
= state
->crtc_x
;
594 dest
->y1
= state
->crtc_y
;
595 dest
->x2
= state
->crtc_x
+ state
->crtc_w
;
596 dest
->y2
= state
->crtc_y
+ state
->crtc_h
;
600 clip
.x2
= crtc_state
->adjusted_mode
.hdisplay
;
601 clip
.y2
= crtc_state
->adjusted_mode
.vdisplay
;
603 ret
= drm_plane_helper_check_update(plane
, crtc
, state
->fb
,
607 true, true, &visible
);
614 vop_plane_state
->format
= vop_convert_format(fb
->pixel_format
);
615 if (vop_plane_state
->format
< 0)
616 return vop_plane_state
->format
;
619 * Src.x1 can be odd when do clip, but yuv plane start point
620 * need align with 2 pixel.
622 if (is_yuv_support(fb
->pixel_format
) && ((src
->x1
>> 16) % 2))
625 vop_plane_state
->enable
= true;
630 vop_plane_state
->enable
= false;
634 static void vop_plane_atomic_disable(struct drm_plane
*plane
,
635 struct drm_plane_state
*old_state
)
637 struct vop_plane_state
*vop_plane_state
= to_vop_plane_state(old_state
);
638 struct vop_win
*vop_win
= to_vop_win(plane
);
639 const struct vop_win_data
*win
= vop_win
->data
;
640 struct vop
*vop
= to_vop(old_state
->crtc
);
642 if (!old_state
->crtc
)
645 spin_lock(&vop
->reg_lock
);
647 VOP_WIN_SET(vop
, win
, enable
, 0);
649 spin_unlock(&vop
->reg_lock
);
651 vop_plane_state
->enable
= false;
654 static void vop_plane_atomic_update(struct drm_plane
*plane
,
655 struct drm_plane_state
*old_state
)
657 struct drm_plane_state
*state
= plane
->state
;
658 struct drm_crtc
*crtc
= state
->crtc
;
659 struct vop_win
*vop_win
= to_vop_win(plane
);
660 struct vop_plane_state
*vop_plane_state
= to_vop_plane_state(state
);
661 const struct vop_win_data
*win
= vop_win
->data
;
662 struct vop
*vop
= to_vop(state
->crtc
);
663 struct drm_framebuffer
*fb
= state
->fb
;
664 unsigned int actual_w
, actual_h
;
665 unsigned int dsp_stx
, dsp_sty
;
666 uint32_t act_info
, dsp_info
, dsp_st
;
667 struct drm_rect
*src
= &vop_plane_state
->src
;
668 struct drm_rect
*dest
= &vop_plane_state
->dest
;
669 struct drm_gem_object
*obj
, *uv_obj
;
670 struct rockchip_gem_object
*rk_obj
, *rk_uv_obj
;
671 unsigned long offset
;
677 * can't update plane when vop is disabled.
682 if (WARN_ON(!vop
->is_enabled
))
685 if (!vop_plane_state
->enable
) {
686 vop_plane_atomic_disable(plane
, old_state
);
690 obj
= rockchip_fb_get_gem_obj(fb
, 0);
691 rk_obj
= to_rockchip_obj(obj
);
693 actual_w
= drm_rect_width(src
) >> 16;
694 actual_h
= drm_rect_height(src
) >> 16;
695 act_info
= (actual_h
- 1) << 16 | ((actual_w
- 1) & 0xffff);
697 dsp_info
= (drm_rect_height(dest
) - 1) << 16;
698 dsp_info
|= (drm_rect_width(dest
) - 1) & 0xffff;
700 dsp_stx
= dest
->x1
+ crtc
->mode
.htotal
- crtc
->mode
.hsync_start
;
701 dsp_sty
= dest
->y1
+ crtc
->mode
.vtotal
- crtc
->mode
.vsync_start
;
702 dsp_st
= dsp_sty
<< 16 | (dsp_stx
& 0xffff);
704 offset
= (src
->x1
>> 16) * drm_format_plane_cpp(fb
->pixel_format
, 0);
705 offset
+= (src
->y1
>> 16) * fb
->pitches
[0];
706 vop_plane_state
->yrgb_mst
= rk_obj
->dma_addr
+ offset
+ fb
->offsets
[0];
708 spin_lock(&vop
->reg_lock
);
710 VOP_WIN_SET(vop
, win
, format
, vop_plane_state
->format
);
711 VOP_WIN_SET(vop
, win
, yrgb_vir
, fb
->pitches
[0] >> 2);
712 VOP_WIN_SET(vop
, win
, yrgb_mst
, vop_plane_state
->yrgb_mst
);
713 if (is_yuv_support(fb
->pixel_format
)) {
714 int hsub
= drm_format_horz_chroma_subsampling(fb
->pixel_format
);
715 int vsub
= drm_format_vert_chroma_subsampling(fb
->pixel_format
);
716 int bpp
= drm_format_plane_cpp(fb
->pixel_format
, 1);
718 uv_obj
= rockchip_fb_get_gem_obj(fb
, 1);
719 rk_uv_obj
= to_rockchip_obj(uv_obj
);
721 offset
= (src
->x1
>> 16) * bpp
/ hsub
;
722 offset
+= (src
->y1
>> 16) * fb
->pitches
[1] / vsub
;
724 dma_addr
= rk_uv_obj
->dma_addr
+ offset
+ fb
->offsets
[1];
725 VOP_WIN_SET(vop
, win
, uv_vir
, fb
->pitches
[1] >> 2);
726 VOP_WIN_SET(vop
, win
, uv_mst
, dma_addr
);
730 scl_vop_cal_scl_fac(vop
, win
, actual_w
, actual_h
,
731 drm_rect_width(dest
), drm_rect_height(dest
),
734 VOP_WIN_SET(vop
, win
, act_info
, act_info
);
735 VOP_WIN_SET(vop
, win
, dsp_info
, dsp_info
);
736 VOP_WIN_SET(vop
, win
, dsp_st
, dsp_st
);
738 rb_swap
= has_rb_swapped(fb
->pixel_format
);
739 VOP_WIN_SET(vop
, win
, rb_swap
, rb_swap
);
741 if (is_alpha_support(fb
->pixel_format
)) {
742 VOP_WIN_SET(vop
, win
, dst_alpha_ctl
,
743 DST_FACTOR_M0(ALPHA_SRC_INVERSE
));
744 val
= SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL
) |
745 SRC_ALPHA_M0(ALPHA_STRAIGHT
) |
746 SRC_BLEND_M0(ALPHA_PER_PIX
) |
747 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION
) |
748 SRC_FACTOR_M0(ALPHA_ONE
);
749 VOP_WIN_SET(vop
, win
, src_alpha_ctl
, val
);
751 VOP_WIN_SET(vop
, win
, src_alpha_ctl
, SRC_ALPHA_EN(0));
754 VOP_WIN_SET(vop
, win
, enable
, 1);
755 spin_unlock(&vop
->reg_lock
);
758 static const struct drm_plane_helper_funcs plane_helper_funcs
= {
759 .atomic_check
= vop_plane_atomic_check
,
760 .atomic_update
= vop_plane_atomic_update
,
761 .atomic_disable
= vop_plane_atomic_disable
,
764 void vop_atomic_plane_reset(struct drm_plane
*plane
)
766 struct vop_plane_state
*vop_plane_state
=
767 to_vop_plane_state(plane
->state
);
769 if (plane
->state
&& plane
->state
->fb
)
770 drm_framebuffer_unreference(plane
->state
->fb
);
772 kfree(vop_plane_state
);
773 vop_plane_state
= kzalloc(sizeof(*vop_plane_state
), GFP_KERNEL
);
774 if (!vop_plane_state
)
777 plane
->state
= &vop_plane_state
->base
;
778 plane
->state
->plane
= plane
;
781 struct drm_plane_state
*
782 vop_atomic_plane_duplicate_state(struct drm_plane
*plane
)
784 struct vop_plane_state
*old_vop_plane_state
;
785 struct vop_plane_state
*vop_plane_state
;
787 if (WARN_ON(!plane
->state
))
790 old_vop_plane_state
= to_vop_plane_state(plane
->state
);
791 vop_plane_state
= kmemdup(old_vop_plane_state
,
792 sizeof(*vop_plane_state
), GFP_KERNEL
);
793 if (!vop_plane_state
)
796 __drm_atomic_helper_plane_duplicate_state(plane
,
797 &vop_plane_state
->base
);
799 return &vop_plane_state
->base
;
802 static void vop_atomic_plane_destroy_state(struct drm_plane
*plane
,
803 struct drm_plane_state
*state
)
805 struct vop_plane_state
*vop_state
= to_vop_plane_state(state
);
807 __drm_atomic_helper_plane_destroy_state(plane
, state
);
812 static const struct drm_plane_funcs vop_plane_funcs
= {
813 .update_plane
= drm_atomic_helper_update_plane
,
814 .disable_plane
= drm_atomic_helper_disable_plane
,
815 .destroy
= vop_plane_destroy
,
816 .reset
= vop_atomic_plane_reset
,
817 .atomic_duplicate_state
= vop_atomic_plane_duplicate_state
,
818 .atomic_destroy_state
= vop_atomic_plane_destroy_state
,
821 int rockchip_drm_crtc_mode_config(struct drm_crtc
*crtc
,
825 struct vop
*vop
= to_vop(crtc
);
827 if (WARN_ON(!vop
->is_enabled
))
830 switch (connector_type
) {
831 case DRM_MODE_CONNECTOR_LVDS
:
832 VOP_CTRL_SET(vop
, rgb_en
, 1);
834 case DRM_MODE_CONNECTOR_eDP
:
835 VOP_CTRL_SET(vop
, edp_en
, 1);
837 case DRM_MODE_CONNECTOR_HDMIA
:
838 VOP_CTRL_SET(vop
, hdmi_en
, 1);
840 case DRM_MODE_CONNECTOR_DSI
:
841 VOP_CTRL_SET(vop
, mipi_en
, 1);
844 DRM_ERROR("unsupport connector_type[%d]\n", connector_type
);
847 VOP_CTRL_SET(vop
, out_mode
, out_mode
);
851 EXPORT_SYMBOL_GPL(rockchip_drm_crtc_mode_config
);
853 static int vop_crtc_enable_vblank(struct drm_crtc
*crtc
)
855 struct vop
*vop
= to_vop(crtc
);
858 if (WARN_ON(!vop
->is_enabled
))
861 spin_lock_irqsave(&vop
->irq_lock
, flags
);
863 VOP_INTR_SET_TYPE(vop
, enable
, FS_INTR
, 1);
865 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
870 static void vop_crtc_disable_vblank(struct drm_crtc
*crtc
)
872 struct vop
*vop
= to_vop(crtc
);
875 if (WARN_ON(!vop
->is_enabled
))
878 spin_lock_irqsave(&vop
->irq_lock
, flags
);
880 VOP_INTR_SET_TYPE(vop
, enable
, FS_INTR
, 0);
882 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
885 static void vop_crtc_wait_for_update(struct drm_crtc
*crtc
)
887 struct vop
*vop
= to_vop(crtc
);
889 reinit_completion(&vop
->wait_update_complete
);
890 WARN_ON(!wait_for_completion_timeout(&vop
->wait_update_complete
, 100));
893 static void vop_crtc_cancel_pending_vblank(struct drm_crtc
*crtc
,
894 struct drm_file
*file_priv
)
896 struct drm_device
*drm
= crtc
->dev
;
897 struct vop
*vop
= to_vop(crtc
);
898 struct drm_pending_vblank_event
*e
;
901 spin_lock_irqsave(&drm
->event_lock
, flags
);
903 if (e
&& e
->base
.file_priv
== file_priv
) {
906 e
->base
.destroy(&e
->base
);
907 file_priv
->event_space
+= sizeof(e
->event
);
909 spin_unlock_irqrestore(&drm
->event_lock
, flags
);
912 static const struct rockchip_crtc_funcs private_crtc_funcs
= {
913 .enable_vblank
= vop_crtc_enable_vblank
,
914 .disable_vblank
= vop_crtc_disable_vblank
,
915 .wait_for_update
= vop_crtc_wait_for_update
,
916 .cancel_pending_vblank
= vop_crtc_cancel_pending_vblank
,
919 static bool vop_crtc_mode_fixup(struct drm_crtc
*crtc
,
920 const struct drm_display_mode
*mode
,
921 struct drm_display_mode
*adjusted_mode
)
923 struct vop
*vop
= to_vop(crtc
);
925 adjusted_mode
->clock
=
926 clk_round_rate(vop
->dclk
, mode
->clock
* 1000) / 1000;
931 static void vop_crtc_enable(struct drm_crtc
*crtc
)
933 struct vop
*vop
= to_vop(crtc
);
934 struct drm_display_mode
*adjusted_mode
= &crtc
->state
->adjusted_mode
;
935 u16 hsync_len
= adjusted_mode
->hsync_end
- adjusted_mode
->hsync_start
;
936 u16 hdisplay
= adjusted_mode
->hdisplay
;
937 u16 htotal
= adjusted_mode
->htotal
;
938 u16 hact_st
= adjusted_mode
->htotal
- adjusted_mode
->hsync_start
;
939 u16 hact_end
= hact_st
+ hdisplay
;
940 u16 vdisplay
= adjusted_mode
->vdisplay
;
941 u16 vtotal
= adjusted_mode
->vtotal
;
942 u16 vsync_len
= adjusted_mode
->vsync_end
- adjusted_mode
->vsync_start
;
943 u16 vact_st
= adjusted_mode
->vtotal
- adjusted_mode
->vsync_start
;
944 u16 vact_end
= vact_st
+ vdisplay
;
949 * If dclk rate is zero, mean that scanout is stop,
950 * we don't need wait any more.
952 if (clk_get_rate(vop
->dclk
)) {
954 * Rk3288 vop timing register is immediately, when configure
955 * display timing on display time, may cause tearing.
957 * Vop standby will take effect at end of current frame,
958 * if dsp hold valid irq happen, it means standby complete.
961 * standby and wait complete --> |----
965 * configure display timing --> |
970 reinit_completion(&vop
->dsp_hold_completion
);
971 vop_dsp_hold_valid_irq_enable(vop
);
973 spin_lock(&vop
->reg_lock
);
975 VOP_CTRL_SET(vop
, standby
, 1);
977 spin_unlock(&vop
->reg_lock
);
979 wait_for_completion(&vop
->dsp_hold_completion
);
981 vop_dsp_hold_valid_irq_disable(vop
);
985 val
|= (adjusted_mode
->flags
& DRM_MODE_FLAG_NHSYNC
) ? 0 : 1;
986 val
|= (adjusted_mode
->flags
& DRM_MODE_FLAG_NVSYNC
) ? 0 : (1 << 1);
987 VOP_CTRL_SET(vop
, pin_pol
, val
);
989 VOP_CTRL_SET(vop
, htotal_pw
, (htotal
<< 16) | hsync_len
);
992 VOP_CTRL_SET(vop
, hact_st_end
, val
);
993 VOP_CTRL_SET(vop
, hpost_st_end
, val
);
995 VOP_CTRL_SET(vop
, vtotal_pw
, (vtotal
<< 16) | vsync_len
);
998 VOP_CTRL_SET(vop
, vact_st_end
, val
);
999 VOP_CTRL_SET(vop
, vpost_st_end
, val
);
1001 clk_set_rate(vop
->dclk
, adjusted_mode
->clock
* 1000);
1003 VOP_CTRL_SET(vop
, standby
, 0);
1006 static void vop_crtc_atomic_flush(struct drm_crtc
*crtc
,
1007 struct drm_crtc_state
*old_crtc_state
)
1009 struct vop
*vop
= to_vop(crtc
);
1011 if (WARN_ON(!vop
->is_enabled
))
1014 spin_lock(&vop
->reg_lock
);
1018 spin_unlock(&vop
->reg_lock
);
1021 static void vop_crtc_atomic_begin(struct drm_crtc
*crtc
,
1022 struct drm_crtc_state
*old_crtc_state
)
1024 struct vop
*vop
= to_vop(crtc
);
1026 if (crtc
->state
->event
) {
1027 WARN_ON(drm_crtc_vblank_get(crtc
) != 0);
1029 vop
->event
= crtc
->state
->event
;
1030 crtc
->state
->event
= NULL
;
1034 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs
= {
1035 .enable
= vop_crtc_enable
,
1036 .disable
= vop_crtc_disable
,
1037 .mode_fixup
= vop_crtc_mode_fixup
,
1038 .atomic_flush
= vop_crtc_atomic_flush
,
1039 .atomic_begin
= vop_crtc_atomic_begin
,
1042 static void vop_crtc_destroy(struct drm_crtc
*crtc
)
1044 drm_crtc_cleanup(crtc
);
1047 static const struct drm_crtc_funcs vop_crtc_funcs
= {
1048 .set_config
= drm_atomic_helper_set_config
,
1049 .page_flip
= drm_atomic_helper_page_flip
,
1050 .destroy
= vop_crtc_destroy
,
1051 .reset
= drm_atomic_helper_crtc_reset
,
1052 .atomic_duplicate_state
= drm_atomic_helper_crtc_duplicate_state
,
1053 .atomic_destroy_state
= drm_atomic_helper_crtc_destroy_state
,
1056 static bool vop_win_pending_is_complete(struct vop_win
*vop_win
)
1058 struct drm_plane
*plane
= &vop_win
->base
;
1059 struct vop_plane_state
*state
= to_vop_plane_state(plane
->state
);
1060 dma_addr_t yrgb_mst
;
1063 return VOP_WIN_GET(vop_win
->vop
, vop_win
->data
, enable
) == 0;
1065 yrgb_mst
= VOP_WIN_GET_YRGBADDR(vop_win
->vop
, vop_win
->data
);
1067 return yrgb_mst
== state
->yrgb_mst
;
1070 static void vop_handle_vblank(struct vop
*vop
)
1072 struct drm_device
*drm
= vop
->drm_dev
;
1073 struct drm_crtc
*crtc
= &vop
->crtc
;
1074 unsigned long flags
;
1077 for (i
= 0; i
< vop
->data
->win_size
; i
++) {
1078 if (!vop_win_pending_is_complete(&vop
->win
[i
]))
1083 spin_lock_irqsave(&drm
->event_lock
, flags
);
1085 drm_crtc_send_vblank_event(crtc
, vop
->event
);
1086 drm_crtc_vblank_put(crtc
);
1089 spin_unlock_irqrestore(&drm
->event_lock
, flags
);
1091 if (!completion_done(&vop
->wait_update_complete
))
1092 complete(&vop
->wait_update_complete
);
1095 static irqreturn_t
vop_isr(int irq
, void *data
)
1097 struct vop
*vop
= data
;
1098 struct drm_crtc
*crtc
= &vop
->crtc
;
1099 uint32_t active_irqs
;
1100 unsigned long flags
;
1104 * interrupt register has interrupt status, enable and clear bits, we
1105 * must hold irq_lock to avoid a race with enable/disable_vblank().
1107 spin_lock_irqsave(&vop
->irq_lock
, flags
);
1109 active_irqs
= VOP_INTR_GET_TYPE(vop
, status
, INTR_MASK
);
1110 /* Clear all active interrupt sources */
1112 VOP_INTR_SET_TYPE(vop
, clear
, active_irqs
, 1);
1114 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
1116 /* This is expected for vop iommu irqs, since the irq is shared */
1120 if (active_irqs
& DSP_HOLD_VALID_INTR
) {
1121 complete(&vop
->dsp_hold_completion
);
1122 active_irqs
&= ~DSP_HOLD_VALID_INTR
;
1126 if (active_irqs
& FS_INTR
) {
1127 drm_crtc_handle_vblank(crtc
);
1128 vop_handle_vblank(vop
);
1129 active_irqs
&= ~FS_INTR
;
1133 /* Unhandled irqs are spurious. */
1135 DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs
);
1140 static int vop_create_crtc(struct vop
*vop
)
1142 const struct vop_data
*vop_data
= vop
->data
;
1143 struct device
*dev
= vop
->dev
;
1144 struct drm_device
*drm_dev
= vop
->drm_dev
;
1145 struct drm_plane
*primary
= NULL
, *cursor
= NULL
, *plane
, *tmp
;
1146 struct drm_crtc
*crtc
= &vop
->crtc
;
1147 struct device_node
*port
;
1152 * Create drm_plane for primary and cursor planes first, since we need
1153 * to pass them to drm_crtc_init_with_planes, which sets the
1154 * "possible_crtcs" to the newly initialized crtc.
1156 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1157 struct vop_win
*vop_win
= &vop
->win
[i
];
1158 const struct vop_win_data
*win_data
= vop_win
->data
;
1160 if (win_data
->type
!= DRM_PLANE_TYPE_PRIMARY
&&
1161 win_data
->type
!= DRM_PLANE_TYPE_CURSOR
)
1164 ret
= drm_universal_plane_init(vop
->drm_dev
, &vop_win
->base
,
1165 0, &vop_plane_funcs
,
1166 win_data
->phy
->data_formats
,
1167 win_data
->phy
->nformats
,
1168 win_data
->type
, NULL
);
1170 DRM_ERROR("failed to initialize plane\n");
1171 goto err_cleanup_planes
;
1174 plane
= &vop_win
->base
;
1175 drm_plane_helper_add(plane
, &plane_helper_funcs
);
1176 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
1178 else if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
1182 ret
= drm_crtc_init_with_planes(drm_dev
, crtc
, primary
, cursor
,
1183 &vop_crtc_funcs
, NULL
);
1185 goto err_cleanup_planes
;
1187 drm_crtc_helper_add(crtc
, &vop_crtc_helper_funcs
);
1190 * Create drm_planes for overlay windows with possible_crtcs restricted
1191 * to the newly created crtc.
1193 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1194 struct vop_win
*vop_win
= &vop
->win
[i
];
1195 const struct vop_win_data
*win_data
= vop_win
->data
;
1196 unsigned long possible_crtcs
= 1 << drm_crtc_index(crtc
);
1198 if (win_data
->type
!= DRM_PLANE_TYPE_OVERLAY
)
1201 ret
= drm_universal_plane_init(vop
->drm_dev
, &vop_win
->base
,
1204 win_data
->phy
->data_formats
,
1205 win_data
->phy
->nformats
,
1206 win_data
->type
, NULL
);
1208 DRM_ERROR("failed to initialize overlay plane\n");
1209 goto err_cleanup_crtc
;
1211 drm_plane_helper_add(&vop_win
->base
, &plane_helper_funcs
);
1214 port
= of_get_child_by_name(dev
->of_node
, "port");
1216 DRM_ERROR("no port node found in %s\n",
1217 dev
->of_node
->full_name
);
1219 goto err_cleanup_crtc
;
1222 init_completion(&vop
->dsp_hold_completion
);
1223 init_completion(&vop
->wait_update_complete
);
1225 rockchip_register_crtc_funcs(crtc
, &private_crtc_funcs
);
1230 drm_crtc_cleanup(crtc
);
1232 list_for_each_entry_safe(plane
, tmp
, &drm_dev
->mode_config
.plane_list
,
1234 drm_plane_cleanup(plane
);
1238 static void vop_destroy_crtc(struct vop
*vop
)
1240 struct drm_crtc
*crtc
= &vop
->crtc
;
1241 struct drm_device
*drm_dev
= vop
->drm_dev
;
1242 struct drm_plane
*plane
, *tmp
;
1244 rockchip_unregister_crtc_funcs(crtc
);
1245 of_node_put(crtc
->port
);
1248 * We need to cleanup the planes now. Why?
1250 * The planes are "&vop->win[i].base". That means the memory is
1251 * all part of the big "struct vop" chunk of memory. That memory
1252 * was devm allocated and associated with this component. We need to
1253 * free it ourselves before vop_unbind() finishes.
1255 list_for_each_entry_safe(plane
, tmp
, &drm_dev
->mode_config
.plane_list
,
1257 vop_plane_destroy(plane
);
1260 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
1261 * references the CRTC.
1263 drm_crtc_cleanup(crtc
);
1266 static int vop_initial(struct vop
*vop
)
1268 const struct vop_data
*vop_data
= vop
->data
;
1269 const struct vop_reg_data
*init_table
= vop_data
->init_table
;
1270 struct reset_control
*ahb_rst
;
1273 vop
->hclk
= devm_clk_get(vop
->dev
, "hclk_vop");
1274 if (IS_ERR(vop
->hclk
)) {
1275 dev_err(vop
->dev
, "failed to get hclk source\n");
1276 return PTR_ERR(vop
->hclk
);
1278 vop
->aclk
= devm_clk_get(vop
->dev
, "aclk_vop");
1279 if (IS_ERR(vop
->aclk
)) {
1280 dev_err(vop
->dev
, "failed to get aclk source\n");
1281 return PTR_ERR(vop
->aclk
);
1283 vop
->dclk
= devm_clk_get(vop
->dev
, "dclk_vop");
1284 if (IS_ERR(vop
->dclk
)) {
1285 dev_err(vop
->dev
, "failed to get dclk source\n");
1286 return PTR_ERR(vop
->dclk
);
1289 ret
= clk_prepare(vop
->dclk
);
1291 dev_err(vop
->dev
, "failed to prepare dclk\n");
1295 /* Enable both the hclk and aclk to setup the vop */
1296 ret
= clk_prepare_enable(vop
->hclk
);
1298 dev_err(vop
->dev
, "failed to prepare/enable hclk\n");
1299 goto err_unprepare_dclk
;
1302 ret
= clk_prepare_enable(vop
->aclk
);
1304 dev_err(vop
->dev
, "failed to prepare/enable aclk\n");
1305 goto err_disable_hclk
;
1309 * do hclk_reset, reset all vop registers.
1311 ahb_rst
= devm_reset_control_get(vop
->dev
, "ahb");
1312 if (IS_ERR(ahb_rst
)) {
1313 dev_err(vop
->dev
, "failed to get ahb reset\n");
1314 ret
= PTR_ERR(ahb_rst
);
1315 goto err_disable_aclk
;
1317 reset_control_assert(ahb_rst
);
1318 usleep_range(10, 20);
1319 reset_control_deassert(ahb_rst
);
1321 memcpy(vop
->regsbak
, vop
->regs
, vop
->len
);
1323 for (i
= 0; i
< vop_data
->table_size
; i
++)
1324 vop_writel(vop
, init_table
[i
].offset
, init_table
[i
].value
);
1326 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1327 const struct vop_win_data
*win
= &vop_data
->win
[i
];
1329 VOP_WIN_SET(vop
, win
, enable
, 0);
1335 * do dclk_reset, let all config take affect.
1337 vop
->dclk_rst
= devm_reset_control_get(vop
->dev
, "dclk");
1338 if (IS_ERR(vop
->dclk_rst
)) {
1339 dev_err(vop
->dev
, "failed to get dclk reset\n");
1340 ret
= PTR_ERR(vop
->dclk_rst
);
1341 goto err_disable_aclk
;
1343 reset_control_assert(vop
->dclk_rst
);
1344 usleep_range(10, 20);
1345 reset_control_deassert(vop
->dclk_rst
);
1347 clk_disable(vop
->hclk
);
1348 clk_disable(vop
->aclk
);
1350 vop
->is_enabled
= false;
1355 clk_disable_unprepare(vop
->aclk
);
1357 clk_disable_unprepare(vop
->hclk
);
1359 clk_unprepare(vop
->dclk
);
1364 * Initialize the vop->win array elements.
1366 static void vop_win_init(struct vop
*vop
)
1368 const struct vop_data
*vop_data
= vop
->data
;
1371 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1372 struct vop_win
*vop_win
= &vop
->win
[i
];
1373 const struct vop_win_data
*win_data
= &vop_data
->win
[i
];
1375 vop_win
->data
= win_data
;
1380 static int vop_bind(struct device
*dev
, struct device
*master
, void *data
)
1382 struct platform_device
*pdev
= to_platform_device(dev
);
1383 const struct vop_data
*vop_data
;
1384 struct drm_device
*drm_dev
= data
;
1386 struct resource
*res
;
1390 vop_data
= of_device_get_match_data(dev
);
1394 /* Allocate vop struct and its vop_win array */
1395 alloc_size
= sizeof(*vop
) + sizeof(*vop
->win
) * vop_data
->win_size
;
1396 vop
= devm_kzalloc(dev
, alloc_size
, GFP_KERNEL
);
1401 vop
->data
= vop_data
;
1402 vop
->drm_dev
= drm_dev
;
1403 dev_set_drvdata(dev
, vop
);
1407 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1408 vop
->len
= resource_size(res
);
1409 vop
->regs
= devm_ioremap_resource(dev
, res
);
1410 if (IS_ERR(vop
->regs
))
1411 return PTR_ERR(vop
->regs
);
1413 vop
->regsbak
= devm_kzalloc(dev
, vop
->len
, GFP_KERNEL
);
1417 ret
= vop_initial(vop
);
1419 dev_err(&pdev
->dev
, "cannot initial vop dev - err %d\n", ret
);
1423 irq
= platform_get_irq(pdev
, 0);
1425 dev_err(dev
, "cannot find irq for vop\n");
1428 vop
->irq
= (unsigned int)irq
;
1430 spin_lock_init(&vop
->reg_lock
);
1431 spin_lock_init(&vop
->irq_lock
);
1433 mutex_init(&vop
->vsync_mutex
);
1435 ret
= devm_request_irq(dev
, vop
->irq
, vop_isr
,
1436 IRQF_SHARED
, dev_name(dev
), vop
);
1440 /* IRQ is initially disabled; it gets enabled in power_on */
1441 disable_irq(vop
->irq
);
1443 ret
= vop_create_crtc(vop
);
1447 pm_runtime_enable(&pdev
->dev
);
1451 static void vop_unbind(struct device
*dev
, struct device
*master
, void *data
)
1453 struct vop
*vop
= dev_get_drvdata(dev
);
1455 pm_runtime_disable(dev
);
1456 vop_destroy_crtc(vop
);
1459 const struct component_ops vop_component_ops
= {
1461 .unbind
= vop_unbind
,
1463 EXPORT_SYMBOL_GPL(vop_component_ops
);