2 * Copyright (C) 2015 Red Hat, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "virtgpu_drv.h"
27 #include <drm/drm_plane_helper.h>
28 #include <drm/drm_atomic_helper.h>
30 static const uint32_t virtio_gpu_formats
[] = {
41 static const uint32_t virtio_gpu_cursor_formats
[] = {
49 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc
)
55 case DRM_FORMAT_XRGB8888
:
56 format
= VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
;
58 case DRM_FORMAT_ARGB8888
:
59 format
= VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
;
61 case DRM_FORMAT_BGRX8888
:
62 format
= VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
;
64 case DRM_FORMAT_BGRA8888
:
65 format
= VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
;
67 case DRM_FORMAT_RGBX8888
:
68 format
= VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM
;
70 case DRM_FORMAT_RGBA8888
:
71 format
= VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM
;
73 case DRM_FORMAT_XBGR8888
:
74 format
= VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM
;
76 case DRM_FORMAT_ABGR8888
:
77 format
= VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM
;
80 case DRM_FORMAT_XRGB8888
:
81 format
= VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
;
83 case DRM_FORMAT_ARGB8888
:
84 format
= VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
;
86 case DRM_FORMAT_BGRX8888
:
87 format
= VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
;
89 case DRM_FORMAT_BGRA8888
:
90 format
= VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
;
92 case DRM_FORMAT_RGBX8888
:
93 format
= VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM
;
95 case DRM_FORMAT_RGBA8888
:
96 format
= VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM
;
98 case DRM_FORMAT_XBGR8888
:
99 format
= VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM
;
101 case DRM_FORMAT_ABGR8888
:
102 format
= VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM
;
107 * This should not happen, we handle everything listed
108 * in virtio_gpu_formats[].
113 WARN_ON(format
== 0);
117 static void virtio_gpu_plane_destroy(struct drm_plane
*plane
)
119 drm_plane_cleanup(plane
);
123 static const struct drm_plane_funcs virtio_gpu_plane_funcs
= {
124 .update_plane
= drm_atomic_helper_update_plane
,
125 .disable_plane
= drm_atomic_helper_disable_plane
,
126 .destroy
= virtio_gpu_plane_destroy
,
127 .reset
= drm_atomic_helper_plane_reset
,
128 .atomic_duplicate_state
= drm_atomic_helper_plane_duplicate_state
,
129 .atomic_destroy_state
= drm_atomic_helper_plane_destroy_state
,
132 static int virtio_gpu_plane_atomic_check(struct drm_plane
*plane
,
133 struct drm_plane_state
*state
)
138 static void virtio_gpu_primary_plane_update(struct drm_plane
*plane
,
139 struct drm_plane_state
*old_state
)
141 struct drm_device
*dev
= plane
->dev
;
142 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
143 struct virtio_gpu_output
*output
= NULL
;
144 struct virtio_gpu_framebuffer
*vgfb
;
145 struct virtio_gpu_object
*bo
;
148 if (plane
->state
->crtc
)
149 output
= drm_crtc_to_virtio_gpu_output(plane
->state
->crtc
);
151 output
= drm_crtc_to_virtio_gpu_output(old_state
->crtc
);
152 if (WARN_ON(!output
))
155 if (plane
->state
->fb
) {
156 vgfb
= to_virtio_gpu_framebuffer(plane
->state
->fb
);
157 bo
= gem_to_virtio_gpu_obj(vgfb
->obj
);
158 handle
= bo
->hw_res_handle
;
160 virtio_gpu_cmd_transfer_to_host_2d
162 cpu_to_le32(plane
->state
->src_w
>> 16),
163 cpu_to_le32(plane
->state
->src_h
>> 16),
164 cpu_to_le32(plane
->state
->src_x
>> 16),
165 cpu_to_le32(plane
->state
->src_y
>> 16), NULL
);
171 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle
,
172 plane
->state
->crtc_w
, plane
->state
->crtc_h
,
173 plane
->state
->crtc_x
, plane
->state
->crtc_y
,
174 plane
->state
->src_w
>> 16,
175 plane
->state
->src_h
>> 16,
176 plane
->state
->src_x
>> 16,
177 plane
->state
->src_y
>> 16);
178 virtio_gpu_cmd_set_scanout(vgdev
, output
->index
, handle
,
179 plane
->state
->src_w
>> 16,
180 plane
->state
->src_h
>> 16,
181 plane
->state
->src_x
>> 16,
182 plane
->state
->src_y
>> 16);
183 virtio_gpu_cmd_resource_flush(vgdev
, handle
,
184 plane
->state
->src_x
>> 16,
185 plane
->state
->src_y
>> 16,
186 plane
->state
->src_w
>> 16,
187 plane
->state
->src_h
>> 16);
190 static void virtio_gpu_cursor_plane_update(struct drm_plane
*plane
,
191 struct drm_plane_state
*old_state
)
193 struct drm_device
*dev
= plane
->dev
;
194 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
195 struct virtio_gpu_output
*output
= NULL
;
196 struct virtio_gpu_framebuffer
*vgfb
;
197 struct virtio_gpu_fence
*fence
= NULL
;
198 struct virtio_gpu_object
*bo
= NULL
;
202 if (plane
->state
->crtc
)
203 output
= drm_crtc_to_virtio_gpu_output(plane
->state
->crtc
);
205 output
= drm_crtc_to_virtio_gpu_output(old_state
->crtc
);
206 if (WARN_ON(!output
))
209 if (plane
->state
->fb
) {
210 vgfb
= to_virtio_gpu_framebuffer(plane
->state
->fb
);
211 bo
= gem_to_virtio_gpu_obj(vgfb
->obj
);
212 handle
= bo
->hw_res_handle
;
217 if (bo
&& bo
->dumb
&& (plane
->state
->fb
!= old_state
->fb
)) {
218 /* new cursor -- update & wait */
219 virtio_gpu_cmd_transfer_to_host_2d
221 cpu_to_le32(plane
->state
->crtc_w
),
222 cpu_to_le32(plane
->state
->crtc_h
),
224 ret
= virtio_gpu_object_reserve(bo
, false);
226 reservation_object_add_excl_fence(bo
->tbo
.resv
,
228 dma_fence_put(&fence
->f
);
230 virtio_gpu_object_unreserve(bo
);
231 virtio_gpu_object_wait(bo
, false);
235 if (plane
->state
->fb
!= old_state
->fb
) {
236 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle
,
237 plane
->state
->crtc_x
,
238 plane
->state
->crtc_y
,
239 plane
->state
->fb
? plane
->state
->fb
->hot_x
: 0,
240 plane
->state
->fb
? plane
->state
->fb
->hot_y
: 0);
241 output
->cursor
.hdr
.type
=
242 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR
);
243 output
->cursor
.resource_id
= cpu_to_le32(handle
);
244 if (plane
->state
->fb
) {
245 output
->cursor
.hot_x
=
246 cpu_to_le32(plane
->state
->fb
->hot_x
);
247 output
->cursor
.hot_y
=
248 cpu_to_le32(plane
->state
->fb
->hot_y
);
250 output
->cursor
.hot_x
= cpu_to_le32(0);
251 output
->cursor
.hot_y
= cpu_to_le32(0);
254 DRM_DEBUG("move +%d+%d\n",
255 plane
->state
->crtc_x
,
256 plane
->state
->crtc_y
);
257 output
->cursor
.hdr
.type
=
258 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR
);
260 output
->cursor
.pos
.x
= cpu_to_le32(plane
->state
->crtc_x
);
261 output
->cursor
.pos
.y
= cpu_to_le32(plane
->state
->crtc_y
);
262 virtio_gpu_cursor_ping(vgdev
, output
);
265 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs
= {
266 .atomic_check
= virtio_gpu_plane_atomic_check
,
267 .atomic_update
= virtio_gpu_primary_plane_update
,
270 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs
= {
271 .atomic_check
= virtio_gpu_plane_atomic_check
,
272 .atomic_update
= virtio_gpu_cursor_plane_update
,
275 struct drm_plane
*virtio_gpu_plane_init(struct virtio_gpu_device
*vgdev
,
276 enum drm_plane_type type
,
279 struct drm_device
*dev
= vgdev
->ddev
;
280 const struct drm_plane_helper_funcs
*funcs
;
281 struct drm_plane
*plane
;
282 const uint32_t *formats
;
285 plane
= kzalloc(sizeof(*plane
), GFP_KERNEL
);
287 return ERR_PTR(-ENOMEM
);
289 if (type
== DRM_PLANE_TYPE_CURSOR
) {
290 formats
= virtio_gpu_cursor_formats
;
291 nformats
= ARRAY_SIZE(virtio_gpu_cursor_formats
);
292 funcs
= &virtio_gpu_cursor_helper_funcs
;
294 formats
= virtio_gpu_formats
;
295 nformats
= ARRAY_SIZE(virtio_gpu_formats
);
296 funcs
= &virtio_gpu_primary_helper_funcs
;
298 ret
= drm_universal_plane_init(dev
, plane
, 1 << index
,
299 &virtio_gpu_plane_funcs
,
305 drm_plane_helper_add(plane
, funcs
);