1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Xen para-virtual DRM device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
11 #include <linux/dma-buf.h>
12 #include <linux/scatterlist.h>
13 #include <linux/shmem_fs.h>
15 #include <drm/drm_fb_helper.h>
16 #include <drm/drm_gem.h>
17 #include <drm/drm_prime.h>
18 #include <drm/drm_probe_helper.h>
20 #include <xen/balloon.h>
23 #include "xen_drm_front.h"
24 #include "xen_drm_front_gem.h"
26 struct xen_gem_object
{
27 struct drm_gem_object base
;
32 /* set for buffers allocated by the backend */
35 /* this is for imported PRIME buffer */
36 struct sg_table
*sgt_imported
;
39 static inline struct xen_gem_object
*
40 to_xen_gem_obj(struct drm_gem_object
*gem_obj
)
42 return container_of(gem_obj
, struct xen_gem_object
, base
);
45 static int gem_alloc_pages_array(struct xen_gem_object
*xen_obj
,
48 xen_obj
->num_pages
= DIV_ROUND_UP(buf_size
, PAGE_SIZE
);
49 xen_obj
->pages
= kvmalloc_array(xen_obj
->num_pages
,
50 sizeof(struct page
*), GFP_KERNEL
);
51 return !xen_obj
->pages
? -ENOMEM
: 0;
54 static void gem_free_pages_array(struct xen_gem_object
*xen_obj
)
56 kvfree(xen_obj
->pages
);
57 xen_obj
->pages
= NULL
;
60 static const struct vm_operations_struct xen_drm_drv_vm_ops
= {
61 .open
= drm_gem_vm_open
,
62 .close
= drm_gem_vm_close
,
65 static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs
= {
66 .free
= xen_drm_front_gem_object_free
,
67 .get_sg_table
= xen_drm_front_gem_get_sg_table
,
68 .vmap
= xen_drm_front_gem_prime_vmap
,
69 .vunmap
= xen_drm_front_gem_prime_vunmap
,
70 .vm_ops
= &xen_drm_drv_vm_ops
,
73 static struct xen_gem_object
*gem_create_obj(struct drm_device
*dev
,
76 struct xen_gem_object
*xen_obj
;
79 xen_obj
= kzalloc(sizeof(*xen_obj
), GFP_KERNEL
);
81 return ERR_PTR(-ENOMEM
);
83 xen_obj
->base
.funcs
= &xen_drm_front_gem_object_funcs
;
85 ret
= drm_gem_object_init(dev
, &xen_obj
->base
, size
);
94 static struct xen_gem_object
*gem_create(struct drm_device
*dev
, size_t size
)
96 struct xen_drm_front_drm_info
*drm_info
= dev
->dev_private
;
97 struct xen_gem_object
*xen_obj
;
100 size
= round_up(size
, PAGE_SIZE
);
101 xen_obj
= gem_create_obj(dev
, size
);
105 if (drm_info
->front_info
->cfg
.be_alloc
) {
107 * backend will allocate space for this buffer, so
108 * only allocate array of pointers to pages
110 ret
= gem_alloc_pages_array(xen_obj
, size
);
115 * allocate ballooned pages which will be used to map
116 * grant references provided by the backend
118 ret
= xen_alloc_unpopulated_pages(xen_obj
->num_pages
,
121 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
122 xen_obj
->num_pages
, ret
);
123 gem_free_pages_array(xen_obj
);
127 xen_obj
->be_alloc
= true;
131 * need to allocate backing pages now, so we can share those
134 xen_obj
->num_pages
= DIV_ROUND_UP(size
, PAGE_SIZE
);
135 xen_obj
->pages
= drm_gem_get_pages(&xen_obj
->base
);
136 if (IS_ERR(xen_obj
->pages
)) {
137 ret
= PTR_ERR(xen_obj
->pages
);
138 xen_obj
->pages
= NULL
;
145 DRM_ERROR("Failed to allocate buffer with size %zu\n", size
);
149 struct drm_gem_object
*xen_drm_front_gem_create(struct drm_device
*dev
,
152 struct xen_gem_object
*xen_obj
;
154 xen_obj
= gem_create(dev
, size
);
156 return ERR_CAST(xen_obj
);
158 return &xen_obj
->base
;
161 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object
*gem_obj
)
163 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
165 if (xen_obj
->base
.import_attach
) {
166 drm_prime_gem_destroy(&xen_obj
->base
, xen_obj
->sgt_imported
);
167 gem_free_pages_array(xen_obj
);
169 if (xen_obj
->pages
) {
170 if (xen_obj
->be_alloc
) {
171 xen_free_unpopulated_pages(xen_obj
->num_pages
,
173 gem_free_pages_array(xen_obj
);
175 drm_gem_put_pages(&xen_obj
->base
,
176 xen_obj
->pages
, true, false);
180 drm_gem_object_release(gem_obj
);
184 struct page
**xen_drm_front_gem_get_pages(struct drm_gem_object
*gem_obj
)
186 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
188 return xen_obj
->pages
;
191 struct sg_table
*xen_drm_front_gem_get_sg_table(struct drm_gem_object
*gem_obj
)
193 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
196 return ERR_PTR(-ENOMEM
);
198 return drm_prime_pages_to_sg(gem_obj
->dev
,
199 xen_obj
->pages
, xen_obj
->num_pages
);
202 struct drm_gem_object
*
203 xen_drm_front_gem_import_sg_table(struct drm_device
*dev
,
204 struct dma_buf_attachment
*attach
,
205 struct sg_table
*sgt
)
207 struct xen_drm_front_drm_info
*drm_info
= dev
->dev_private
;
208 struct xen_gem_object
*xen_obj
;
212 size
= attach
->dmabuf
->size
;
213 xen_obj
= gem_create_obj(dev
, size
);
215 return ERR_CAST(xen_obj
);
217 ret
= gem_alloc_pages_array(xen_obj
, size
);
221 xen_obj
->sgt_imported
= sgt
;
223 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, xen_obj
->pages
,
224 NULL
, xen_obj
->num_pages
);
228 ret
= xen_drm_front_dbuf_create(drm_info
->front_info
,
229 xen_drm_front_dbuf_to_cookie(&xen_obj
->base
),
230 0, 0, 0, size
, sgt
->sgl
->offset
,
235 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
236 size
, sgt
->orig_nents
);
238 return &xen_obj
->base
;
241 static int gem_mmap_obj(struct xen_gem_object
*xen_obj
,
242 struct vm_area_struct
*vma
)
247 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
248 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
251 vma
->vm_flags
&= ~VM_PFNMAP
;
252 vma
->vm_flags
|= VM_MIXEDMAP
;
255 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
256 * all memory which is shared with other entities in the system
257 * (including the hypervisor and other guests) must reside in memory
258 * which is mapped as Normal Inner Write-Back Outer Write-Back
261 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
264 * vm_operations_struct.fault handler will be called if CPU access
265 * to VM is here. For GPUs this isn't the case, because CPU
266 * doesn't touch the memory. Insert pages now, so both CPU and GPU are
268 * FIXME: as we insert all the pages now then no .fault handler must
269 * be called, so don't provide one
271 ret
= vm_map_pages(vma
, xen_obj
->pages
, xen_obj
->num_pages
);
273 DRM_ERROR("Failed to map pages into vma: %d\n", ret
);
278 int xen_drm_front_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
280 struct xen_gem_object
*xen_obj
;
281 struct drm_gem_object
*gem_obj
;
284 ret
= drm_gem_mmap(filp
, vma
);
288 gem_obj
= vma
->vm_private_data
;
289 xen_obj
= to_xen_gem_obj(gem_obj
);
290 return gem_mmap_obj(xen_obj
, vma
);
293 int xen_drm_front_gem_prime_vmap(struct drm_gem_object
*gem_obj
, struct dma_buf_map
*map
)
295 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
301 /* Please see comment in gem_mmap_obj on mapping and attributes. */
302 vaddr
= vmap(xen_obj
->pages
, xen_obj
->num_pages
,
303 VM_MAP
, PAGE_KERNEL
);
306 dma_buf_map_set_vaddr(map
, vaddr
);
311 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object
*gem_obj
,
312 struct dma_buf_map
*map
)
317 int xen_drm_front_gem_prime_mmap(struct drm_gem_object
*gem_obj
,
318 struct vm_area_struct
*vma
)
320 struct xen_gem_object
*xen_obj
;
323 ret
= drm_gem_mmap_obj(gem_obj
, gem_obj
->size
, vma
);
327 xen_obj
= to_xen_gem_obj(gem_obj
);
328 return gem_mmap_obj(xen_obj
, vma
);