2 * Copyright (C) 2015 Red Hat, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_fourcc.h>
29 #include <drm/drm_plane_helper.h>
31 #include "virtgpu_drv.h"
33 static const uint32_t virtio_gpu_formats
[] = {
34 DRM_FORMAT_HOST_XRGB8888
,
37 static const uint32_t virtio_gpu_cursor_formats
[] = {
38 DRM_FORMAT_HOST_ARGB8888
,
41 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc
)
46 case DRM_FORMAT_XRGB8888
:
47 format
= VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
;
49 case DRM_FORMAT_ARGB8888
:
50 format
= VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
;
52 case DRM_FORMAT_BGRX8888
:
53 format
= VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
;
55 case DRM_FORMAT_BGRA8888
:
56 format
= VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
;
60 * This should not happen, we handle everything listed
61 * in virtio_gpu_formats[].
70 static void virtio_gpu_plane_destroy(struct drm_plane
*plane
)
72 drm_plane_cleanup(plane
);
76 static const struct drm_plane_funcs virtio_gpu_plane_funcs
= {
77 .update_plane
= drm_atomic_helper_update_plane
,
78 .disable_plane
= drm_atomic_helper_disable_plane
,
79 .destroy
= virtio_gpu_plane_destroy
,
80 .reset
= drm_atomic_helper_plane_reset
,
81 .atomic_duplicate_state
= drm_atomic_helper_plane_duplicate_state
,
82 .atomic_destroy_state
= drm_atomic_helper_plane_destroy_state
,
85 static int virtio_gpu_plane_atomic_check(struct drm_plane
*plane
,
86 struct drm_plane_state
*state
)
88 bool is_cursor
= plane
->type
== DRM_PLANE_TYPE_CURSOR
;
89 struct drm_crtc_state
*crtc_state
;
92 if (!state
->fb
|| WARN_ON(!state
->crtc
))
95 crtc_state
= drm_atomic_get_crtc_state(state
->state
, state
->crtc
);
96 if (IS_ERR(crtc_state
))
97 return PTR_ERR(crtc_state
);
99 ret
= drm_atomic_helper_check_plane_state(state
, crtc_state
,
100 DRM_PLANE_HELPER_NO_SCALING
,
101 DRM_PLANE_HELPER_NO_SCALING
,
106 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device
*vgdev
,
107 struct drm_plane_state
*state
,
108 struct drm_rect
*rect
)
110 struct virtio_gpu_object
*bo
=
111 gem_to_virtio_gpu_obj(state
->fb
->obj
[0]);
112 struct virtio_gpu_object_array
*objs
;
113 uint32_t w
= rect
->x2
- rect
->x1
;
114 uint32_t h
= rect
->y2
- rect
->y1
;
115 uint32_t x
= rect
->x1
;
116 uint32_t y
= rect
->y1
;
117 uint32_t off
= x
* state
->fb
->format
->cpp
[0] +
118 y
* state
->fb
->pitches
[0];
120 objs
= virtio_gpu_array_alloc(1);
123 virtio_gpu_array_add_obj(objs
, &bo
->base
.base
);
125 virtio_gpu_cmd_transfer_to_host_2d(vgdev
, off
, w
, h
, x
, y
,
129 static void virtio_gpu_primary_plane_update(struct drm_plane
*plane
,
130 struct drm_plane_state
*old_state
)
132 struct drm_device
*dev
= plane
->dev
;
133 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
134 struct virtio_gpu_output
*output
= NULL
;
135 struct virtio_gpu_object
*bo
;
136 struct drm_rect rect
;
138 if (plane
->state
->crtc
)
139 output
= drm_crtc_to_virtio_gpu_output(plane
->state
->crtc
);
141 output
= drm_crtc_to_virtio_gpu_output(old_state
->crtc
);
142 if (WARN_ON(!output
))
145 if (!plane
->state
->fb
|| !output
->crtc
.state
->active
) {
147 virtio_gpu_cmd_set_scanout(vgdev
, output
->index
, 0,
148 plane
->state
->src_w
>> 16,
149 plane
->state
->src_h
>> 16,
151 virtio_gpu_notify(vgdev
);
155 if (!drm_atomic_helper_damage_merged(old_state
, plane
->state
, &rect
))
158 bo
= gem_to_virtio_gpu_obj(plane
->state
->fb
->obj
[0]);
160 virtio_gpu_update_dumb_bo(vgdev
, plane
->state
, &rect
);
162 if (plane
->state
->fb
!= old_state
->fb
||
163 plane
->state
->src_w
!= old_state
->src_w
||
164 plane
->state
->src_h
!= old_state
->src_h
||
165 plane
->state
->src_x
!= old_state
->src_x
||
166 plane
->state
->src_y
!= old_state
->src_y
||
167 output
->needs_modeset
) {
168 output
->needs_modeset
= false;
169 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
171 plane
->state
->crtc_w
, plane
->state
->crtc_h
,
172 plane
->state
->crtc_x
, plane
->state
->crtc_y
,
173 plane
->state
->src_w
>> 16,
174 plane
->state
->src_h
>> 16,
175 plane
->state
->src_x
>> 16,
176 plane
->state
->src_y
>> 16);
178 if (bo
->host3d_blob
|| bo
->guest_blob
) {
179 virtio_gpu_cmd_set_scanout_blob
180 (vgdev
, output
->index
, bo
,
182 plane
->state
->src_w
>> 16,
183 plane
->state
->src_h
>> 16,
184 plane
->state
->src_x
>> 16,
185 plane
->state
->src_y
>> 16);
187 virtio_gpu_cmd_set_scanout(vgdev
, output
->index
,
189 plane
->state
->src_w
>> 16,
190 plane
->state
->src_h
>> 16,
191 plane
->state
->src_x
>> 16,
192 plane
->state
->src_y
>> 16);
196 virtio_gpu_cmd_resource_flush(vgdev
, bo
->hw_res_handle
,
201 virtio_gpu_notify(vgdev
);
204 static int virtio_gpu_cursor_prepare_fb(struct drm_plane
*plane
,
205 struct drm_plane_state
*new_state
)
207 struct drm_device
*dev
= plane
->dev
;
208 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
209 struct virtio_gpu_framebuffer
*vgfb
;
210 struct virtio_gpu_object
*bo
;
215 vgfb
= to_virtio_gpu_framebuffer(new_state
->fb
);
216 bo
= gem_to_virtio_gpu_obj(vgfb
->base
.obj
[0]);
217 if (bo
&& bo
->dumb
&& (plane
->state
->fb
!= new_state
->fb
)) {
218 vgfb
->fence
= virtio_gpu_fence_alloc(vgdev
);
226 static void virtio_gpu_cursor_cleanup_fb(struct drm_plane
*plane
,
227 struct drm_plane_state
*old_state
)
229 struct virtio_gpu_framebuffer
*vgfb
;
231 if (!plane
->state
->fb
)
234 vgfb
= to_virtio_gpu_framebuffer(plane
->state
->fb
);
236 dma_fence_put(&vgfb
->fence
->f
);
241 static void virtio_gpu_cursor_plane_update(struct drm_plane
*plane
,
242 struct drm_plane_state
*old_state
)
244 struct drm_device
*dev
= plane
->dev
;
245 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
246 struct virtio_gpu_output
*output
= NULL
;
247 struct virtio_gpu_framebuffer
*vgfb
;
248 struct virtio_gpu_object
*bo
= NULL
;
251 if (plane
->state
->crtc
)
252 output
= drm_crtc_to_virtio_gpu_output(plane
->state
->crtc
);
254 output
= drm_crtc_to_virtio_gpu_output(old_state
->crtc
);
255 if (WARN_ON(!output
))
258 if (plane
->state
->fb
) {
259 vgfb
= to_virtio_gpu_framebuffer(plane
->state
->fb
);
260 bo
= gem_to_virtio_gpu_obj(vgfb
->base
.obj
[0]);
261 handle
= bo
->hw_res_handle
;
266 if (bo
&& bo
->dumb
&& (plane
->state
->fb
!= old_state
->fb
)) {
267 /* new cursor -- update & wait */
268 struct virtio_gpu_object_array
*objs
;
270 objs
= virtio_gpu_array_alloc(1);
273 virtio_gpu_array_add_obj(objs
, vgfb
->base
.obj
[0]);
274 virtio_gpu_array_lock_resv(objs
);
275 virtio_gpu_cmd_transfer_to_host_2d
277 plane
->state
->crtc_w
,
278 plane
->state
->crtc_h
,
279 0, 0, objs
, vgfb
->fence
);
280 virtio_gpu_notify(vgdev
);
281 dma_fence_wait(&vgfb
->fence
->f
, true);
282 dma_fence_put(&vgfb
->fence
->f
);
286 if (plane
->state
->fb
!= old_state
->fb
) {
287 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle
,
288 plane
->state
->crtc_x
,
289 plane
->state
->crtc_y
,
290 plane
->state
->fb
? plane
->state
->fb
->hot_x
: 0,
291 plane
->state
->fb
? plane
->state
->fb
->hot_y
: 0);
292 output
->cursor
.hdr
.type
=
293 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR
);
294 output
->cursor
.resource_id
= cpu_to_le32(handle
);
295 if (plane
->state
->fb
) {
296 output
->cursor
.hot_x
=
297 cpu_to_le32(plane
->state
->fb
->hot_x
);
298 output
->cursor
.hot_y
=
299 cpu_to_le32(plane
->state
->fb
->hot_y
);
301 output
->cursor
.hot_x
= cpu_to_le32(0);
302 output
->cursor
.hot_y
= cpu_to_le32(0);
305 DRM_DEBUG("move +%d+%d\n",
306 plane
->state
->crtc_x
,
307 plane
->state
->crtc_y
);
308 output
->cursor
.hdr
.type
=
309 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR
);
311 output
->cursor
.pos
.x
= cpu_to_le32(plane
->state
->crtc_x
);
312 output
->cursor
.pos
.y
= cpu_to_le32(plane
->state
->crtc_y
);
313 virtio_gpu_cursor_ping(vgdev
, output
);
316 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs
= {
317 .atomic_check
= virtio_gpu_plane_atomic_check
,
318 .atomic_update
= virtio_gpu_primary_plane_update
,
321 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs
= {
322 .prepare_fb
= virtio_gpu_cursor_prepare_fb
,
323 .cleanup_fb
= virtio_gpu_cursor_cleanup_fb
,
324 .atomic_check
= virtio_gpu_plane_atomic_check
,
325 .atomic_update
= virtio_gpu_cursor_plane_update
,
328 struct drm_plane
*virtio_gpu_plane_init(struct virtio_gpu_device
*vgdev
,
329 enum drm_plane_type type
,
332 struct drm_device
*dev
= vgdev
->ddev
;
333 const struct drm_plane_helper_funcs
*funcs
;
334 struct drm_plane
*plane
;
335 const uint32_t *formats
;
338 plane
= kzalloc(sizeof(*plane
), GFP_KERNEL
);
340 return ERR_PTR(-ENOMEM
);
342 if (type
== DRM_PLANE_TYPE_CURSOR
) {
343 formats
= virtio_gpu_cursor_formats
;
344 nformats
= ARRAY_SIZE(virtio_gpu_cursor_formats
);
345 funcs
= &virtio_gpu_cursor_helper_funcs
;
347 formats
= virtio_gpu_formats
;
348 nformats
= ARRAY_SIZE(virtio_gpu_formats
);
349 funcs
= &virtio_gpu_primary_helper_funcs
;
351 ret
= drm_universal_plane_init(dev
, plane
, 1 << index
,
352 &virtio_gpu_plane_funcs
,
358 drm_plane_helper_add(plane
, funcs
);