1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Xen para-virtual DRM device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
11 #include "xen_drm_front_gem.h"
14 #include <drm/drm_crtc_helper.h>
15 #include <drm/drm_fb_helper.h>
16 #include <drm/drm_gem.h>
18 #include <linux/dma-buf.h>
19 #include <linux/scatterlist.h>
20 #include <linux/shmem_fs.h>
22 #include <xen/balloon.h>
24 #include "xen_drm_front.h"
25 #include "xen_drm_front_shbuf.h"
27 struct xen_gem_object
{
28 struct drm_gem_object base
;
33 /* set for buffers allocated by the backend */
36 /* this is for imported PRIME buffer */
37 struct sg_table
*sgt_imported
;
40 static inline struct xen_gem_object
*
41 to_xen_gem_obj(struct drm_gem_object
*gem_obj
)
43 return container_of(gem_obj
, struct xen_gem_object
, base
);
46 static int gem_alloc_pages_array(struct xen_gem_object
*xen_obj
,
49 xen_obj
->num_pages
= DIV_ROUND_UP(buf_size
, PAGE_SIZE
);
50 xen_obj
->pages
= kvmalloc_array(xen_obj
->num_pages
,
51 sizeof(struct page
*), GFP_KERNEL
);
52 return !xen_obj
->pages
? -ENOMEM
: 0;
55 static void gem_free_pages_array(struct xen_gem_object
*xen_obj
)
57 kvfree(xen_obj
->pages
);
58 xen_obj
->pages
= NULL
;
61 static struct xen_gem_object
*gem_create_obj(struct drm_device
*dev
,
64 struct xen_gem_object
*xen_obj
;
67 xen_obj
= kzalloc(sizeof(*xen_obj
), GFP_KERNEL
);
69 return ERR_PTR(-ENOMEM
);
71 ret
= drm_gem_object_init(dev
, &xen_obj
->base
, size
);
80 static struct xen_gem_object
*gem_create(struct drm_device
*dev
, size_t size
)
82 struct xen_drm_front_drm_info
*drm_info
= dev
->dev_private
;
83 struct xen_gem_object
*xen_obj
;
86 size
= round_up(size
, PAGE_SIZE
);
87 xen_obj
= gem_create_obj(dev
, size
);
88 if (IS_ERR_OR_NULL(xen_obj
))
91 if (drm_info
->front_info
->cfg
.be_alloc
) {
93 * backend will allocate space for this buffer, so
94 * only allocate array of pointers to pages
96 ret
= gem_alloc_pages_array(xen_obj
, size
);
101 * allocate ballooned pages which will be used to map
102 * grant references provided by the backend
104 ret
= alloc_xenballooned_pages(xen_obj
->num_pages
,
107 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
108 xen_obj
->num_pages
, ret
);
109 gem_free_pages_array(xen_obj
);
113 xen_obj
->be_alloc
= true;
117 * need to allocate backing pages now, so we can share those
120 xen_obj
->num_pages
= DIV_ROUND_UP(size
, PAGE_SIZE
);
121 xen_obj
->pages
= drm_gem_get_pages(&xen_obj
->base
);
122 if (IS_ERR_OR_NULL(xen_obj
->pages
)) {
123 ret
= PTR_ERR(xen_obj
->pages
);
124 xen_obj
->pages
= NULL
;
131 DRM_ERROR("Failed to allocate buffer with size %zu\n", size
);
135 struct drm_gem_object
*xen_drm_front_gem_create(struct drm_device
*dev
,
138 struct xen_gem_object
*xen_obj
;
140 xen_obj
= gem_create(dev
, size
);
141 if (IS_ERR_OR_NULL(xen_obj
))
142 return ERR_CAST(xen_obj
);
144 return &xen_obj
->base
;
147 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object
*gem_obj
)
149 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
151 if (xen_obj
->base
.import_attach
) {
152 drm_prime_gem_destroy(&xen_obj
->base
, xen_obj
->sgt_imported
);
153 gem_free_pages_array(xen_obj
);
155 if (xen_obj
->pages
) {
156 if (xen_obj
->be_alloc
) {
157 free_xenballooned_pages(xen_obj
->num_pages
,
159 gem_free_pages_array(xen_obj
);
161 drm_gem_put_pages(&xen_obj
->base
,
162 xen_obj
->pages
, true, false);
166 drm_gem_object_release(gem_obj
);
170 struct page
**xen_drm_front_gem_get_pages(struct drm_gem_object
*gem_obj
)
172 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
174 return xen_obj
->pages
;
177 struct sg_table
*xen_drm_front_gem_get_sg_table(struct drm_gem_object
*gem_obj
)
179 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
184 return drm_prime_pages_to_sg(xen_obj
->pages
, xen_obj
->num_pages
);
187 struct drm_gem_object
*
188 xen_drm_front_gem_import_sg_table(struct drm_device
*dev
,
189 struct dma_buf_attachment
*attach
,
190 struct sg_table
*sgt
)
192 struct xen_drm_front_drm_info
*drm_info
= dev
->dev_private
;
193 struct xen_gem_object
*xen_obj
;
197 size
= attach
->dmabuf
->size
;
198 xen_obj
= gem_create_obj(dev
, size
);
199 if (IS_ERR_OR_NULL(xen_obj
))
200 return ERR_CAST(xen_obj
);
202 ret
= gem_alloc_pages_array(xen_obj
, size
);
206 xen_obj
->sgt_imported
= sgt
;
208 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, xen_obj
->pages
,
209 NULL
, xen_obj
->num_pages
);
213 ret
= xen_drm_front_dbuf_create(drm_info
->front_info
,
214 xen_drm_front_dbuf_to_cookie(&xen_obj
->base
),
215 0, 0, 0, size
, xen_obj
->pages
);
219 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
222 return &xen_obj
->base
;
225 static int gem_mmap_obj(struct xen_gem_object
*xen_obj
,
226 struct vm_area_struct
*vma
)
228 unsigned long addr
= vma
->vm_start
;
232 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
233 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
236 vma
->vm_flags
&= ~VM_PFNMAP
;
237 vma
->vm_flags
|= VM_MIXEDMAP
;
240 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
243 * vm_operations_struct.fault handler will be called if CPU access
244 * to VM is here. For GPUs this isn't the case, because CPU
245 * doesn't touch the memory. Insert pages now, so both CPU and GPU are
247 * FIXME: as we insert all the pages now then no .fault handler must
248 * be called, so don't provide one
250 for (i
= 0; i
< xen_obj
->num_pages
; i
++) {
253 ret
= vm_insert_page(vma
, addr
, xen_obj
->pages
[i
]);
255 DRM_ERROR("Failed to insert pages into vma: %d\n", ret
);
264 int xen_drm_front_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
266 struct xen_gem_object
*xen_obj
;
267 struct drm_gem_object
*gem_obj
;
270 ret
= drm_gem_mmap(filp
, vma
);
274 gem_obj
= vma
->vm_private_data
;
275 xen_obj
= to_xen_gem_obj(gem_obj
);
276 return gem_mmap_obj(xen_obj
, vma
);
279 void *xen_drm_front_gem_prime_vmap(struct drm_gem_object
*gem_obj
)
281 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
286 return vmap(xen_obj
->pages
, xen_obj
->num_pages
,
287 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
290 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object
*gem_obj
,
296 int xen_drm_front_gem_prime_mmap(struct drm_gem_object
*gem_obj
,
297 struct vm_area_struct
*vma
)
299 struct xen_gem_object
*xen_obj
;
302 ret
= drm_gem_mmap_obj(gem_obj
, gem_obj
->size
, vma
);
306 xen_obj
= to_xen_gem_obj(gem_obj
);
307 return gem_mmap_obj(xen_obj
, vma
);