2 * Copyright (C) 2015 Red Hat, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/dma-mapping.h>
27 #include <linux/moduleparam.h>
29 #include "virtgpu_drv.h"
31 static int virtio_gpu_virglrenderer_workaround
= 1;
32 module_param_named(virglhack
, virtio_gpu_virglrenderer_workaround
, int, 0400);
34 int virtio_gpu_resource_id_get(struct virtio_gpu_device
*vgdev
, uint32_t *resid
)
36 if (virtio_gpu_virglrenderer_workaround
) {
38 * Hack to avoid re-using resource IDs.
40 * virglrenderer versions up to (and including) 0.7.0
41 * can't deal with that. virglrenderer commit
42 * "f91a9dd35715 Fix unlinking resources from hash
43 * table." (Feb 2019) fixes the bug.
45 static atomic_t seqno
= ATOMIC_INIT(0);
46 int handle
= atomic_inc_return(&seqno
);
49 int handle
= ida_alloc(&vgdev
->resource_ida
, GFP_KERNEL
);
57 static void virtio_gpu_resource_id_put(struct virtio_gpu_device
*vgdev
, uint32_t id
)
59 if (!virtio_gpu_virglrenderer_workaround
) {
60 ida_free(&vgdev
->resource_ida
, id
- 1);
64 void virtio_gpu_cleanup_object(struct virtio_gpu_object
*bo
)
66 struct virtio_gpu_device
*vgdev
= bo
->base
.base
.dev
->dev_private
;
68 virtio_gpu_resource_id_put(vgdev
, bo
->hw_res_handle
);
69 if (virtio_gpu_is_shmem(bo
)) {
70 struct virtio_gpu_object_shmem
*shmem
= to_virtio_gpu_shmem(bo
);
74 dma_unmap_sgtable(vgdev
->vdev
->dev
.parent
,
75 shmem
->pages
, DMA_TO_DEVICE
, 0);
79 sg_free_table(shmem
->pages
);
82 drm_gem_shmem_unpin(&bo
->base
.base
);
85 drm_gem_shmem_free_object(&bo
->base
.base
);
86 } else if (virtio_gpu_is_vram(bo
)) {
87 struct virtio_gpu_object_vram
*vram
= to_virtio_gpu_vram(bo
);
89 spin_lock(&vgdev
->host_visible_lock
);
90 if (drm_mm_node_allocated(&vram
->vram_node
))
91 drm_mm_remove_node(&vram
->vram_node
);
93 spin_unlock(&vgdev
->host_visible_lock
);
95 drm_gem_free_mmap_offset(&vram
->base
.base
.base
);
96 drm_gem_object_release(&vram
->base
.base
.base
);
101 static void virtio_gpu_free_object(struct drm_gem_object
*obj
)
103 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(obj
);
104 struct virtio_gpu_device
*vgdev
= bo
->base
.base
.dev
->dev_private
;
107 virtio_gpu_cmd_unref_resource(vgdev
, bo
);
108 virtio_gpu_notify(vgdev
);
109 /* completion handler calls virtio_gpu_cleanup_object() */
112 virtio_gpu_cleanup_object(bo
);
115 static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs
= {
116 .free
= virtio_gpu_free_object
,
117 .open
= virtio_gpu_gem_object_open
,
118 .close
= virtio_gpu_gem_object_close
,
120 .print_info
= drm_gem_shmem_print_info
,
121 .export
= virtgpu_gem_prime_export
,
122 .pin
= drm_gem_shmem_pin
,
123 .unpin
= drm_gem_shmem_unpin
,
124 .get_sg_table
= drm_gem_shmem_get_sg_table
,
125 .vmap
= drm_gem_shmem_vmap
,
126 .vunmap
= drm_gem_shmem_vunmap
,
127 .mmap
= drm_gem_shmem_mmap
,
130 bool virtio_gpu_is_shmem(struct virtio_gpu_object
*bo
)
132 return bo
->base
.base
.funcs
== &virtio_gpu_shmem_funcs
;
135 struct drm_gem_object
*virtio_gpu_create_object(struct drm_device
*dev
,
138 struct virtio_gpu_object_shmem
*shmem
;
139 struct drm_gem_shmem_object
*dshmem
;
141 shmem
= kzalloc(sizeof(*shmem
), GFP_KERNEL
);
145 dshmem
= &shmem
->base
.base
;
146 dshmem
->base
.funcs
= &virtio_gpu_shmem_funcs
;
147 return &dshmem
->base
;
150 static int virtio_gpu_object_shmem_init(struct virtio_gpu_device
*vgdev
,
151 struct virtio_gpu_object
*bo
,
152 struct virtio_gpu_mem_entry
**ents
,
155 bool use_dma_api
= !virtio_has_dma_quirk(vgdev
->vdev
);
156 struct virtio_gpu_object_shmem
*shmem
= to_virtio_gpu_shmem(bo
);
157 struct scatterlist
*sg
;
160 ret
= drm_gem_shmem_pin(&bo
->base
.base
);
165 * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
166 * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
167 * dma-ops. This is discouraged for other drivers, but should be fine
168 * since virtio_gpu doesn't support dma-buf import from other devices.
170 shmem
->pages
= drm_gem_shmem_get_sg_table(&bo
->base
.base
);
172 drm_gem_shmem_unpin(&bo
->base
.base
);
177 ret
= dma_map_sgtable(vgdev
->vdev
->dev
.parent
,
178 shmem
->pages
, DMA_TO_DEVICE
, 0);
181 *nents
= shmem
->mapped
= shmem
->pages
->nents
;
183 *nents
= shmem
->pages
->orig_nents
;
186 *ents
= kvmalloc_array(*nents
,
187 sizeof(struct virtio_gpu_mem_entry
),
190 DRM_ERROR("failed to allocate ent list\n");
195 for_each_sgtable_dma_sg(shmem
->pages
, sg
, si
) {
196 (*ents
)[si
].addr
= cpu_to_le64(sg_dma_address(sg
));
197 (*ents
)[si
].length
= cpu_to_le32(sg_dma_len(sg
));
198 (*ents
)[si
].padding
= 0;
201 for_each_sgtable_sg(shmem
->pages
, sg
, si
) {
202 (*ents
)[si
].addr
= cpu_to_le64(sg_phys(sg
));
203 (*ents
)[si
].length
= cpu_to_le32(sg
->length
);
204 (*ents
)[si
].padding
= 0;
211 int virtio_gpu_object_create(struct virtio_gpu_device
*vgdev
,
212 struct virtio_gpu_object_params
*params
,
213 struct virtio_gpu_object
**bo_ptr
,
214 struct virtio_gpu_fence
*fence
)
216 struct virtio_gpu_object_array
*objs
= NULL
;
217 struct drm_gem_shmem_object
*shmem_obj
;
218 struct virtio_gpu_object
*bo
;
219 struct virtio_gpu_mem_entry
*ents
;
225 params
->size
= roundup(params
->size
, PAGE_SIZE
);
226 shmem_obj
= drm_gem_shmem_create(vgdev
->ddev
, params
->size
);
227 if (IS_ERR(shmem_obj
))
228 return PTR_ERR(shmem_obj
);
229 bo
= gem_to_virtio_gpu_obj(&shmem_obj
->base
);
231 ret
= virtio_gpu_resource_id_get(vgdev
, &bo
->hw_res_handle
);
235 bo
->dumb
= params
->dumb
;
239 objs
= virtio_gpu_array_alloc(1);
242 virtio_gpu_array_add_obj(objs
, &bo
->base
.base
);
244 ret
= virtio_gpu_array_lock_resv(objs
);
249 ret
= virtio_gpu_object_shmem_init(vgdev
, bo
, &ents
, &nents
);
251 virtio_gpu_free_object(&shmem_obj
->base
);
256 virtio_gpu_cmd_resource_create_blob(vgdev
, bo
, params
,
258 } else if (params
->virgl
) {
259 virtio_gpu_cmd_resource_create_3d(vgdev
, bo
, params
,
261 virtio_gpu_object_attach(vgdev
, bo
, ents
, nents
);
263 virtio_gpu_cmd_create_resource(vgdev
, bo
, params
,
265 virtio_gpu_object_attach(vgdev
, bo
, ents
, nents
);
272 virtio_gpu_array_put_free(objs
);
274 virtio_gpu_resource_id_put(vgdev
, bo
->hw_res_handle
);
276 drm_gem_shmem_free_object(&shmem_obj
->base
);