2 * Copyright (C) 2015 Red Hat, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "virtgpu_drv.h"
27 #include <drm/drm_plane_helper.h>
28 #include <drm/drm_atomic_helper.h>
30 static const uint32_t virtio_gpu_formats
[] = {
41 static const uint32_t virtio_gpu_cursor_formats
[] = {
45 static void virtio_gpu_plane_destroy(struct drm_plane
*plane
)
50 static const struct drm_plane_funcs virtio_gpu_plane_funcs
= {
51 .update_plane
= drm_atomic_helper_update_plane
,
52 .disable_plane
= drm_atomic_helper_disable_plane
,
53 .destroy
= virtio_gpu_plane_destroy
,
54 .reset
= drm_atomic_helper_plane_reset
,
55 .atomic_duplicate_state
= drm_atomic_helper_plane_duplicate_state
,
56 .atomic_destroy_state
= drm_atomic_helper_plane_destroy_state
,
59 static int virtio_gpu_plane_atomic_check(struct drm_plane
*plane
,
60 struct drm_plane_state
*state
)
65 static void virtio_gpu_primary_plane_update(struct drm_plane
*plane
,
66 struct drm_plane_state
*old_state
)
68 struct drm_device
*dev
= plane
->dev
;
69 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
70 struct virtio_gpu_output
*output
= NULL
;
71 struct virtio_gpu_framebuffer
*vgfb
;
72 struct virtio_gpu_object
*bo
;
75 if (plane
->state
->crtc
)
76 output
= drm_crtc_to_virtio_gpu_output(plane
->state
->crtc
);
78 output
= drm_crtc_to_virtio_gpu_output(old_state
->crtc
);
82 if (plane
->state
->fb
) {
83 vgfb
= to_virtio_gpu_framebuffer(plane
->state
->fb
);
84 bo
= gem_to_virtio_gpu_obj(vgfb
->obj
);
85 handle
= bo
->hw_res_handle
;
87 virtio_gpu_cmd_transfer_to_host_2d
89 cpu_to_le32(plane
->state
->src_w
>> 16),
90 cpu_to_le32(plane
->state
->src_h
>> 16),
91 plane
->state
->src_x
>> 16,
92 plane
->state
->src_y
>> 16, NULL
);
98 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle
,
99 plane
->state
->crtc_w
, plane
->state
->crtc_h
,
100 plane
->state
->crtc_x
, plane
->state
->crtc_y
,
101 plane
->state
->src_w
>> 16,
102 plane
->state
->src_h
>> 16,
103 plane
->state
->src_x
>> 16,
104 plane
->state
->src_y
>> 16);
105 virtio_gpu_cmd_set_scanout(vgdev
, output
->index
, handle
,
106 plane
->state
->src_w
>> 16,
107 plane
->state
->src_h
>> 16,
108 plane
->state
->src_x
>> 16,
109 plane
->state
->src_y
>> 16);
110 virtio_gpu_cmd_resource_flush(vgdev
, handle
,
111 plane
->state
->src_x
>> 16,
112 plane
->state
->src_y
>> 16,
113 plane
->state
->src_w
>> 16,
114 plane
->state
->src_h
>> 16);
117 static void virtio_gpu_cursor_plane_update(struct drm_plane
*plane
,
118 struct drm_plane_state
*old_state
)
120 struct drm_device
*dev
= plane
->dev
;
121 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
122 struct virtio_gpu_output
*output
= NULL
;
123 struct virtio_gpu_framebuffer
*vgfb
;
124 struct virtio_gpu_fence
*fence
= NULL
;
125 struct virtio_gpu_object
*bo
= NULL
;
129 if (plane
->state
->crtc
)
130 output
= drm_crtc_to_virtio_gpu_output(plane
->state
->crtc
);
132 output
= drm_crtc_to_virtio_gpu_output(old_state
->crtc
);
133 if (WARN_ON(!output
))
136 if (plane
->state
->fb
) {
137 vgfb
= to_virtio_gpu_framebuffer(plane
->state
->fb
);
138 bo
= gem_to_virtio_gpu_obj(vgfb
->obj
);
139 handle
= bo
->hw_res_handle
;
144 if (bo
&& bo
->dumb
&& (plane
->state
->fb
!= old_state
->fb
)) {
145 /* new cursor -- update & wait */
146 virtio_gpu_cmd_transfer_to_host_2d
148 cpu_to_le32(plane
->state
->crtc_w
),
149 cpu_to_le32(plane
->state
->crtc_h
),
151 ret
= virtio_gpu_object_reserve(bo
, false);
153 reservation_object_add_excl_fence(bo
->tbo
.resv
,
155 fence_put(&fence
->f
);
157 virtio_gpu_object_unreserve(bo
);
158 virtio_gpu_object_wait(bo
, false);
162 if (plane
->state
->fb
!= old_state
->fb
) {
163 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle
,
164 plane
->state
->crtc_x
,
165 plane
->state
->crtc_y
,
166 plane
->state
->fb
? plane
->state
->fb
->hot_x
: 0,
167 plane
->state
->fb
? plane
->state
->fb
->hot_y
: 0);
168 output
->cursor
.hdr
.type
=
169 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR
);
170 output
->cursor
.resource_id
= cpu_to_le32(handle
);
171 if (plane
->state
->fb
) {
172 output
->cursor
.hot_x
=
173 cpu_to_le32(plane
->state
->fb
->hot_x
);
174 output
->cursor
.hot_y
=
175 cpu_to_le32(plane
->state
->fb
->hot_y
);
177 output
->cursor
.hot_x
= cpu_to_le32(0);
178 output
->cursor
.hot_y
= cpu_to_le32(0);
181 DRM_DEBUG("move +%d+%d\n",
182 plane
->state
->crtc_x
,
183 plane
->state
->crtc_y
);
184 output
->cursor
.hdr
.type
=
185 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR
);
187 output
->cursor
.pos
.x
= cpu_to_le32(plane
->state
->crtc_x
);
188 output
->cursor
.pos
.y
= cpu_to_le32(plane
->state
->crtc_y
);
189 virtio_gpu_cursor_ping(vgdev
, output
);
192 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs
= {
193 .atomic_check
= virtio_gpu_plane_atomic_check
,
194 .atomic_update
= virtio_gpu_primary_plane_update
,
197 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs
= {
198 .atomic_check
= virtio_gpu_plane_atomic_check
,
199 .atomic_update
= virtio_gpu_cursor_plane_update
,
202 struct drm_plane
*virtio_gpu_plane_init(struct virtio_gpu_device
*vgdev
,
203 enum drm_plane_type type
,
206 struct drm_device
*dev
= vgdev
->ddev
;
207 const struct drm_plane_helper_funcs
*funcs
;
208 struct drm_plane
*plane
;
209 const uint32_t *formats
;
212 plane
= kzalloc(sizeof(*plane
), GFP_KERNEL
);
214 return ERR_PTR(-ENOMEM
);
216 if (type
== DRM_PLANE_TYPE_CURSOR
) {
217 formats
= virtio_gpu_cursor_formats
;
218 nformats
= ARRAY_SIZE(virtio_gpu_cursor_formats
);
219 funcs
= &virtio_gpu_cursor_helper_funcs
;
221 formats
= virtio_gpu_formats
;
222 nformats
= ARRAY_SIZE(virtio_gpu_formats
);
223 funcs
= &virtio_gpu_primary_helper_funcs
;
225 ret
= drm_universal_plane_init(dev
, plane
, 1 << index
,
226 &virtio_gpu_plane_funcs
,
232 drm_plane_helper_add(plane
, funcs
);