2 * Copyright (C) 2015 Red Hat, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <drm/drm_fb_helper.h>
28 #include "virtgpu_drv.h"
30 #define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
32 struct virtio_gpu_fbdev
{
33 struct drm_fb_helper helper
;
34 struct virtio_gpu_framebuffer vgfb
;
35 struct virtio_gpu_device
*vgdev
;
36 struct delayed_work work
;
39 static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer
*fb
,
40 bool store
, int x
, int y
,
41 int width
, int height
)
43 struct drm_device
*dev
= fb
->base
.dev
;
44 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
45 bool store_for_later
= false;
46 int bpp
= fb
->base
.bits_per_pixel
/ 8;
49 struct virtio_gpu_object
*obj
= gem_to_virtio_gpu_obj(fb
->obj
);
52 (x
+ width
> fb
->base
.width
) ||
53 (y
+ height
> fb
->base
.height
)) {
54 DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n",
56 fb
->base
.width
, fb
->base
.height
);
61 * Can be called with pretty much any context (console output
62 * path). If we are in atomic just store the dirty rect info
63 * to send out the update later.
65 * Can't test inside spin lock.
67 if (in_atomic() || store
)
68 store_for_later
= true;
73 spin_lock_irqsave(&fb
->dirty_lock
, flags
);
84 if (store_for_later
) {
89 spin_unlock_irqrestore(&fb
->dirty_lock
, flags
);
93 fb
->x1
= fb
->y1
= INT_MAX
;
96 spin_unlock_irqrestore(&fb
->dirty_lock
, flags
);
100 uint32_t w
= x2
- x
+ 1;
101 uint32_t h
= y2
- y
+ 1;
103 offset
= (y
* fb
->base
.pitches
[0]) + x
* bpp
;
105 virtio_gpu_cmd_transfer_to_host_2d(vgdev
, obj
->hw_res_handle
,
114 virtio_gpu_cmd_resource_flush(vgdev
, obj
->hw_res_handle
,
115 x
, y
, x2
- x
+ 1, y2
- y
+ 1);
119 int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer
*vgfb
,
120 struct drm_clip_rect
*clips
,
123 struct virtio_gpu_device
*vgdev
= vgfb
->base
.dev
->dev_private
;
124 struct virtio_gpu_object
*obj
= gem_to_virtio_gpu_obj(vgfb
->obj
);
125 struct drm_clip_rect norect
;
126 struct drm_clip_rect
*clips_ptr
;
127 int left
, right
, top
, bottom
;
133 norect
.x1
= norect
.y1
= 0;
134 norect
.x2
= vgfb
->base
.width
;
135 norect
.y2
= vgfb
->base
.height
;
142 /* skip the first clip rect */
143 for (i
= 1, clips_ptr
= clips
+ inc
;
144 i
< num_clips
; i
++, clips_ptr
+= inc
) {
145 left
= min_t(int, left
, (int)clips_ptr
->x1
);
146 right
= max_t(int, right
, (int)clips_ptr
->x2
);
147 top
= min_t(int, top
, (int)clips_ptr
->y1
);
148 bottom
= max_t(int, bottom
, (int)clips_ptr
->y2
);
152 return virtio_gpu_dirty_update(vgfb
, false, left
, top
,
153 right
- left
, bottom
- top
);
155 virtio_gpu_cmd_resource_flush(vgdev
, obj
->hw_res_handle
,
156 left
, top
, right
- left
, bottom
- top
);
160 static void virtio_gpu_fb_dirty_work(struct work_struct
*work
)
162 struct delayed_work
*delayed_work
= to_delayed_work(work
);
163 struct virtio_gpu_fbdev
*vfbdev
=
164 container_of(delayed_work
, struct virtio_gpu_fbdev
, work
);
165 struct virtio_gpu_framebuffer
*vgfb
= &vfbdev
->vgfb
;
167 virtio_gpu_dirty_update(&vfbdev
->vgfb
, false, vgfb
->x1
, vgfb
->y1
,
168 vgfb
->x2
- vgfb
->x1
, vgfb
->y2
- vgfb
->y1
);
171 static void virtio_gpu_3d_fillrect(struct fb_info
*info
,
172 const struct fb_fillrect
*rect
)
174 struct virtio_gpu_fbdev
*vfbdev
= info
->par
;
175 drm_fb_helper_sys_fillrect(info
, rect
);
176 virtio_gpu_dirty_update(&vfbdev
->vgfb
, true, rect
->dx
, rect
->dy
,
177 rect
->width
, rect
->height
);
178 schedule_delayed_work(&vfbdev
->work
, VIRTIO_GPU_FBCON_POLL_PERIOD
);
181 static void virtio_gpu_3d_copyarea(struct fb_info
*info
,
182 const struct fb_copyarea
*area
)
184 struct virtio_gpu_fbdev
*vfbdev
= info
->par
;
185 drm_fb_helper_sys_copyarea(info
, area
);
186 virtio_gpu_dirty_update(&vfbdev
->vgfb
, true, area
->dx
, area
->dy
,
187 area
->width
, area
->height
);
188 schedule_delayed_work(&vfbdev
->work
, VIRTIO_GPU_FBCON_POLL_PERIOD
);
191 static void virtio_gpu_3d_imageblit(struct fb_info
*info
,
192 const struct fb_image
*image
)
194 struct virtio_gpu_fbdev
*vfbdev
= info
->par
;
195 drm_fb_helper_sys_imageblit(info
, image
);
196 virtio_gpu_dirty_update(&vfbdev
->vgfb
, true, image
->dx
, image
->dy
,
197 image
->width
, image
->height
);
198 schedule_delayed_work(&vfbdev
->work
, VIRTIO_GPU_FBCON_POLL_PERIOD
);
201 static struct fb_ops virtio_gpufb_ops
= {
202 .owner
= THIS_MODULE
,
203 .fb_check_var
= drm_fb_helper_check_var
,
204 .fb_set_par
= drm_fb_helper_set_par
, /* TODO: copy vmwgfx */
205 .fb_fillrect
= virtio_gpu_3d_fillrect
,
206 .fb_copyarea
= virtio_gpu_3d_copyarea
,
207 .fb_imageblit
= virtio_gpu_3d_imageblit
,
208 .fb_pan_display
= drm_fb_helper_pan_display
,
209 .fb_blank
= drm_fb_helper_blank
,
210 .fb_setcmap
= drm_fb_helper_setcmap
,
211 .fb_debug_enter
= drm_fb_helper_debug_enter
,
212 .fb_debug_leave
= drm_fb_helper_debug_leave
,
215 static int virtio_gpu_vmap_fb(struct virtio_gpu_device
*vgdev
,
216 struct virtio_gpu_object
*obj
)
218 return virtio_gpu_object_kmap(obj
, NULL
);
221 static int virtio_gpufb_create(struct drm_fb_helper
*helper
,
222 struct drm_fb_helper_surface_size
*sizes
)
224 struct virtio_gpu_fbdev
*vfbdev
=
225 container_of(helper
, struct virtio_gpu_fbdev
, helper
);
226 struct drm_device
*dev
= helper
->dev
;
227 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
228 struct fb_info
*info
;
229 struct drm_framebuffer
*fb
;
230 struct drm_mode_fb_cmd2 mode_cmd
= {};
231 struct virtio_gpu_object
*obj
;
232 uint32_t resid
, format
, size
;
235 mode_cmd
.width
= sizes
->surface_width
;
236 mode_cmd
.height
= sizes
->surface_height
;
237 mode_cmd
.pitches
[0] = mode_cmd
.width
* 4;
238 mode_cmd
.pixel_format
= drm_mode_legacy_fb_format(32, 24);
240 switch (mode_cmd
.pixel_format
) {
242 case DRM_FORMAT_XRGB8888
:
243 format
= VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
;
245 case DRM_FORMAT_ARGB8888
:
246 format
= VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
;
248 case DRM_FORMAT_BGRX8888
:
249 format
= VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
;
251 case DRM_FORMAT_BGRA8888
:
252 format
= VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
;
254 case DRM_FORMAT_RGBX8888
:
255 format
= VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM
;
257 case DRM_FORMAT_RGBA8888
:
258 format
= VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM
;
260 case DRM_FORMAT_XBGR8888
:
261 format
= VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM
;
263 case DRM_FORMAT_ABGR8888
:
264 format
= VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM
;
267 case DRM_FORMAT_XRGB8888
:
268 format
= VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
;
270 case DRM_FORMAT_ARGB8888
:
271 format
= VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
;
273 case DRM_FORMAT_BGRX8888
:
274 format
= VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
;
276 case DRM_FORMAT_BGRA8888
:
277 format
= VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
;
279 case DRM_FORMAT_RGBX8888
:
280 format
= VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM
;
282 case DRM_FORMAT_RGBA8888
:
283 format
= VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM
;
285 case DRM_FORMAT_XBGR8888
:
286 format
= VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM
;
288 case DRM_FORMAT_ABGR8888
:
289 format
= VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM
;
293 DRM_ERROR("failed to find virtio gpu format for %d\n",
294 mode_cmd
.pixel_format
);
298 size
= mode_cmd
.pitches
[0] * mode_cmd
.height
;
299 obj
= virtio_gpu_alloc_object(dev
, size
, false, true);
303 virtio_gpu_resource_id_get(vgdev
, &resid
);
304 virtio_gpu_cmd_create_resource(vgdev
, resid
, format
,
305 mode_cmd
.width
, mode_cmd
.height
);
307 ret
= virtio_gpu_vmap_fb(vgdev
, obj
);
309 DRM_ERROR("failed to vmap fb %d\n", ret
);
313 /* attach the object to the resource */
314 ret
= virtio_gpu_object_attach(vgdev
, obj
, resid
, NULL
);
318 info
= drm_fb_helper_alloc_fbi(helper
);
326 ret
= virtio_gpu_framebuffer_init(dev
, &vfbdev
->vgfb
,
327 &mode_cmd
, &obj
->gem_base
);
331 fb
= &vfbdev
->vgfb
.base
;
333 vfbdev
->helper
.fb
= fb
;
335 strcpy(info
->fix
.id
, "virtiodrmfb");
336 info
->flags
= FBINFO_DEFAULT
;
337 info
->fbops
= &virtio_gpufb_ops
;
338 info
->pixmap
.flags
= FB_PIXMAP_SYSTEM
;
340 info
->screen_buffer
= obj
->vmap
;
341 info
->screen_size
= obj
->gem_base
.size
;
342 drm_fb_helper_fill_fix(info
, fb
->pitches
[0], fb
->depth
);
343 drm_fb_helper_fill_var(info
, &vfbdev
->helper
,
344 sizes
->fb_width
, sizes
->fb_height
);
346 info
->fix
.mmio_start
= 0;
347 info
->fix
.mmio_len
= 0;
351 drm_fb_helper_release_fbi(helper
);
353 virtio_gpu_cmd_resource_inval_backing(vgdev
, resid
);
356 virtio_gpu_gem_free_object(&obj
->gem_base
);
360 static int virtio_gpu_fbdev_destroy(struct drm_device
*dev
,
361 struct virtio_gpu_fbdev
*vgfbdev
)
363 struct virtio_gpu_framebuffer
*vgfb
= &vgfbdev
->vgfb
;
365 drm_fb_helper_unregister_fbi(&vgfbdev
->helper
);
366 drm_fb_helper_release_fbi(&vgfbdev
->helper
);
370 drm_fb_helper_fini(&vgfbdev
->helper
);
371 drm_framebuffer_cleanup(&vgfb
->base
);
375 static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs
= {
376 .fb_probe
= virtio_gpufb_create
,
379 int virtio_gpu_fbdev_init(struct virtio_gpu_device
*vgdev
)
381 struct virtio_gpu_fbdev
*vgfbdev
;
382 int bpp_sel
= 32; /* TODO: parameter from somewhere? */
385 vgfbdev
= kzalloc(sizeof(struct virtio_gpu_fbdev
), GFP_KERNEL
);
389 vgfbdev
->vgdev
= vgdev
;
390 vgdev
->vgfbdev
= vgfbdev
;
391 INIT_DELAYED_WORK(&vgfbdev
->work
, virtio_gpu_fb_dirty_work
);
393 drm_fb_helper_prepare(vgdev
->ddev
, &vgfbdev
->helper
,
394 &virtio_gpu_fb_helper_funcs
);
395 ret
= drm_fb_helper_init(vgdev
->ddev
, &vgfbdev
->helper
,
397 VIRTIO_GPUFB_CONN_LIMIT
);
403 drm_fb_helper_single_add_all_connectors(&vgfbdev
->helper
);
404 drm_fb_helper_initial_config(&vgfbdev
->helper
, bpp_sel
);
408 void virtio_gpu_fbdev_fini(struct virtio_gpu_device
*vgdev
)
413 virtio_gpu_fbdev_destroy(vgdev
->ddev
, vgdev
->vgfbdev
);
414 kfree(vgdev
->vgfbdev
);
415 vgdev
->vgfbdev
= NULL
;