2 * Copyright (C) 2015 Red Hat, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_fourcc.h>
29 #include <drm/drm_plane_helper.h>
31 #include "virtgpu_drv.h"
33 static const uint32_t virtio_gpu_formats
[] = {
34 DRM_FORMAT_HOST_XRGB8888
,
37 static const uint32_t virtio_gpu_cursor_formats
[] = {
38 DRM_FORMAT_HOST_ARGB8888
,
41 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc
)
46 case DRM_FORMAT_XRGB8888
:
47 format
= VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
;
49 case DRM_FORMAT_ARGB8888
:
50 format
= VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
;
52 case DRM_FORMAT_BGRX8888
:
53 format
= VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
;
55 case DRM_FORMAT_BGRA8888
:
56 format
= VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
;
60 * This should not happen, we handle everything listed
61 * in virtio_gpu_formats[].
70 static void virtio_gpu_plane_destroy(struct drm_plane
*plane
)
72 drm_plane_cleanup(plane
);
76 static const struct drm_plane_funcs virtio_gpu_plane_funcs
= {
77 .update_plane
= drm_atomic_helper_update_plane
,
78 .disable_plane
= drm_atomic_helper_disable_plane
,
79 .destroy
= virtio_gpu_plane_destroy
,
80 .reset
= drm_atomic_helper_plane_reset
,
81 .atomic_duplicate_state
= drm_atomic_helper_plane_duplicate_state
,
82 .atomic_destroy_state
= drm_atomic_helper_plane_destroy_state
,
85 static int virtio_gpu_plane_atomic_check(struct drm_plane
*plane
,
86 struct drm_plane_state
*state
)
88 bool is_cursor
= plane
->type
== DRM_PLANE_TYPE_CURSOR
;
89 struct drm_crtc_state
*crtc_state
;
92 if (!state
->fb
|| WARN_ON(!state
->crtc
))
95 crtc_state
= drm_atomic_get_crtc_state(state
->state
, state
->crtc
);
96 if (IS_ERR(crtc_state
))
97 return PTR_ERR(crtc_state
);
99 ret
= drm_atomic_helper_check_plane_state(state
, crtc_state
,
100 DRM_PLANE_HELPER_NO_SCALING
,
101 DRM_PLANE_HELPER_NO_SCALING
,
106 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device
*vgdev
,
107 struct drm_plane_state
*state
,
108 struct drm_rect
*rect
)
110 struct virtio_gpu_object
*bo
=
111 gem_to_virtio_gpu_obj(state
->fb
->obj
[0]);
112 struct virtio_gpu_object_array
*objs
;
113 uint32_t w
= rect
->x2
- rect
->x1
;
114 uint32_t h
= rect
->y2
- rect
->y1
;
115 uint32_t x
= rect
->x1
;
116 uint32_t y
= rect
->y1
;
117 uint32_t off
= x
* state
->fb
->format
->cpp
[0] +
118 y
* state
->fb
->pitches
[0];
120 objs
= virtio_gpu_array_alloc(1);
123 virtio_gpu_array_add_obj(objs
, &bo
->base
.base
);
125 virtio_gpu_cmd_transfer_to_host_2d(vgdev
, off
, w
, h
, x
, y
,
129 static void virtio_gpu_primary_plane_update(struct drm_plane
*plane
,
130 struct drm_plane_state
*old_state
)
132 struct drm_device
*dev
= plane
->dev
;
133 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
134 struct virtio_gpu_output
*output
= NULL
;
135 struct virtio_gpu_object
*bo
;
136 struct drm_rect rect
;
138 if (plane
->state
->crtc
)
139 output
= drm_crtc_to_virtio_gpu_output(plane
->state
->crtc
);
141 output
= drm_crtc_to_virtio_gpu_output(old_state
->crtc
);
142 if (WARN_ON(!output
))
145 if (!plane
->state
->fb
|| !output
->enabled
) {
147 virtio_gpu_cmd_set_scanout(vgdev
, output
->index
, 0,
148 plane
->state
->src_w
>> 16,
149 plane
->state
->src_h
>> 16,
154 if (!drm_atomic_helper_damage_merged(old_state
, plane
->state
, &rect
))
157 virtio_gpu_disable_notify(vgdev
);
159 bo
= gem_to_virtio_gpu_obj(plane
->state
->fb
->obj
[0]);
161 virtio_gpu_update_dumb_bo(vgdev
, plane
->state
, &rect
);
163 if (plane
->state
->fb
!= old_state
->fb
||
164 plane
->state
->src_w
!= old_state
->src_w
||
165 plane
->state
->src_h
!= old_state
->src_h
||
166 plane
->state
->src_x
!= old_state
->src_x
||
167 plane
->state
->src_y
!= old_state
->src_y
) {
168 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
170 plane
->state
->crtc_w
, plane
->state
->crtc_h
,
171 plane
->state
->crtc_x
, plane
->state
->crtc_y
,
172 plane
->state
->src_w
>> 16,
173 plane
->state
->src_h
>> 16,
174 plane
->state
->src_x
>> 16,
175 plane
->state
->src_y
>> 16);
176 virtio_gpu_cmd_set_scanout(vgdev
, output
->index
,
178 plane
->state
->src_w
>> 16,
179 plane
->state
->src_h
>> 16,
180 plane
->state
->src_x
>> 16,
181 plane
->state
->src_y
>> 16);
184 virtio_gpu_cmd_resource_flush(vgdev
, bo
->hw_res_handle
,
190 virtio_gpu_enable_notify(vgdev
);
193 static int virtio_gpu_cursor_prepare_fb(struct drm_plane
*plane
,
194 struct drm_plane_state
*new_state
)
196 struct drm_device
*dev
= plane
->dev
;
197 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
198 struct virtio_gpu_framebuffer
*vgfb
;
199 struct virtio_gpu_object
*bo
;
204 vgfb
= to_virtio_gpu_framebuffer(new_state
->fb
);
205 bo
= gem_to_virtio_gpu_obj(vgfb
->base
.obj
[0]);
206 if (bo
&& bo
->dumb
&& (plane
->state
->fb
!= new_state
->fb
)) {
207 vgfb
->fence
= virtio_gpu_fence_alloc(vgdev
);
215 static void virtio_gpu_cursor_cleanup_fb(struct drm_plane
*plane
,
216 struct drm_plane_state
*old_state
)
218 struct virtio_gpu_framebuffer
*vgfb
;
220 if (!plane
->state
->fb
)
223 vgfb
= to_virtio_gpu_framebuffer(plane
->state
->fb
);
225 dma_fence_put(&vgfb
->fence
->f
);
230 static void virtio_gpu_cursor_plane_update(struct drm_plane
*plane
,
231 struct drm_plane_state
*old_state
)
233 struct drm_device
*dev
= plane
->dev
;
234 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
235 struct virtio_gpu_output
*output
= NULL
;
236 struct virtio_gpu_framebuffer
*vgfb
;
237 struct virtio_gpu_object
*bo
= NULL
;
240 if (plane
->state
->crtc
)
241 output
= drm_crtc_to_virtio_gpu_output(plane
->state
->crtc
);
243 output
= drm_crtc_to_virtio_gpu_output(old_state
->crtc
);
244 if (WARN_ON(!output
))
247 if (plane
->state
->fb
) {
248 vgfb
= to_virtio_gpu_framebuffer(plane
->state
->fb
);
249 bo
= gem_to_virtio_gpu_obj(vgfb
->base
.obj
[0]);
250 handle
= bo
->hw_res_handle
;
255 if (bo
&& bo
->dumb
&& (plane
->state
->fb
!= old_state
->fb
)) {
256 /* new cursor -- update & wait */
257 struct virtio_gpu_object_array
*objs
;
259 objs
= virtio_gpu_array_alloc(1);
262 virtio_gpu_array_add_obj(objs
, vgfb
->base
.obj
[0]);
263 virtio_gpu_array_lock_resv(objs
);
264 virtio_gpu_cmd_transfer_to_host_2d
266 plane
->state
->crtc_w
,
267 plane
->state
->crtc_h
,
268 0, 0, objs
, vgfb
->fence
);
269 dma_fence_wait(&vgfb
->fence
->f
, true);
270 dma_fence_put(&vgfb
->fence
->f
);
274 if (plane
->state
->fb
!= old_state
->fb
) {
275 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle
,
276 plane
->state
->crtc_x
,
277 plane
->state
->crtc_y
,
278 plane
->state
->fb
? plane
->state
->fb
->hot_x
: 0,
279 plane
->state
->fb
? plane
->state
->fb
->hot_y
: 0);
280 output
->cursor
.hdr
.type
=
281 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR
);
282 output
->cursor
.resource_id
= cpu_to_le32(handle
);
283 if (plane
->state
->fb
) {
284 output
->cursor
.hot_x
=
285 cpu_to_le32(plane
->state
->fb
->hot_x
);
286 output
->cursor
.hot_y
=
287 cpu_to_le32(plane
->state
->fb
->hot_y
);
289 output
->cursor
.hot_x
= cpu_to_le32(0);
290 output
->cursor
.hot_y
= cpu_to_le32(0);
293 DRM_DEBUG("move +%d+%d\n",
294 plane
->state
->crtc_x
,
295 plane
->state
->crtc_y
);
296 output
->cursor
.hdr
.type
=
297 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR
);
299 output
->cursor
.pos
.x
= cpu_to_le32(plane
->state
->crtc_x
);
300 output
->cursor
.pos
.y
= cpu_to_le32(plane
->state
->crtc_y
);
301 virtio_gpu_cursor_ping(vgdev
, output
);
304 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs
= {
305 .atomic_check
= virtio_gpu_plane_atomic_check
,
306 .atomic_update
= virtio_gpu_primary_plane_update
,
309 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs
= {
310 .prepare_fb
= virtio_gpu_cursor_prepare_fb
,
311 .cleanup_fb
= virtio_gpu_cursor_cleanup_fb
,
312 .atomic_check
= virtio_gpu_plane_atomic_check
,
313 .atomic_update
= virtio_gpu_cursor_plane_update
,
316 struct drm_plane
*virtio_gpu_plane_init(struct virtio_gpu_device
*vgdev
,
317 enum drm_plane_type type
,
320 struct drm_device
*dev
= vgdev
->ddev
;
321 const struct drm_plane_helper_funcs
*funcs
;
322 struct drm_plane
*plane
;
323 const uint32_t *formats
;
326 plane
= kzalloc(sizeof(*plane
), GFP_KERNEL
);
328 return ERR_PTR(-ENOMEM
);
330 if (type
== DRM_PLANE_TYPE_CURSOR
) {
331 formats
= virtio_gpu_cursor_formats
;
332 nformats
= ARRAY_SIZE(virtio_gpu_cursor_formats
);
333 funcs
= &virtio_gpu_cursor_helper_funcs
;
335 formats
= virtio_gpu_formats
;
336 nformats
= ARRAY_SIZE(virtio_gpu_formats
);
337 funcs
= &virtio_gpu_primary_helper_funcs
;
339 ret
= drm_universal_plane_init(dev
, plane
, 1 << index
,
340 &virtio_gpu_plane_funcs
,
346 drm_plane_helper_add(plane
, funcs
);