1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Xen para-virtual DRM device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
11 #include "xen_drm_front_gem.h"
14 #include <drm/drm_fb_helper.h>
15 #include <drm/drm_gem.h>
16 #include <drm/drm_probe_helper.h>
18 #include <linux/dma-buf.h>
19 #include <linux/scatterlist.h>
20 #include <linux/shmem_fs.h>
22 #include <xen/balloon.h>
24 #include "xen_drm_front.h"
26 struct xen_gem_object
{
27 struct drm_gem_object base
;
32 /* set for buffers allocated by the backend */
35 /* this is for imported PRIME buffer */
36 struct sg_table
*sgt_imported
;
39 static inline struct xen_gem_object
*
40 to_xen_gem_obj(struct drm_gem_object
*gem_obj
)
42 return container_of(gem_obj
, struct xen_gem_object
, base
);
45 static int gem_alloc_pages_array(struct xen_gem_object
*xen_obj
,
48 xen_obj
->num_pages
= DIV_ROUND_UP(buf_size
, PAGE_SIZE
);
49 xen_obj
->pages
= kvmalloc_array(xen_obj
->num_pages
,
50 sizeof(struct page
*), GFP_KERNEL
);
51 return !xen_obj
->pages
? -ENOMEM
: 0;
54 static void gem_free_pages_array(struct xen_gem_object
*xen_obj
)
56 kvfree(xen_obj
->pages
);
57 xen_obj
->pages
= NULL
;
60 static struct xen_gem_object
*gem_create_obj(struct drm_device
*dev
,
63 struct xen_gem_object
*xen_obj
;
66 xen_obj
= kzalloc(sizeof(*xen_obj
), GFP_KERNEL
);
68 return ERR_PTR(-ENOMEM
);
70 ret
= drm_gem_object_init(dev
, &xen_obj
->base
, size
);
79 static struct xen_gem_object
*gem_create(struct drm_device
*dev
, size_t size
)
81 struct xen_drm_front_drm_info
*drm_info
= dev
->dev_private
;
82 struct xen_gem_object
*xen_obj
;
85 size
= round_up(size
, PAGE_SIZE
);
86 xen_obj
= gem_create_obj(dev
, size
);
87 if (IS_ERR_OR_NULL(xen_obj
))
90 if (drm_info
->front_info
->cfg
.be_alloc
) {
92 * backend will allocate space for this buffer, so
93 * only allocate array of pointers to pages
95 ret
= gem_alloc_pages_array(xen_obj
, size
);
100 * allocate ballooned pages which will be used to map
101 * grant references provided by the backend
103 ret
= alloc_xenballooned_pages(xen_obj
->num_pages
,
106 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
107 xen_obj
->num_pages
, ret
);
108 gem_free_pages_array(xen_obj
);
112 xen_obj
->be_alloc
= true;
116 * need to allocate backing pages now, so we can share those
119 xen_obj
->num_pages
= DIV_ROUND_UP(size
, PAGE_SIZE
);
120 xen_obj
->pages
= drm_gem_get_pages(&xen_obj
->base
);
121 if (IS_ERR_OR_NULL(xen_obj
->pages
)) {
122 ret
= PTR_ERR(xen_obj
->pages
);
123 xen_obj
->pages
= NULL
;
130 DRM_ERROR("Failed to allocate buffer with size %zu\n", size
);
134 struct drm_gem_object
*xen_drm_front_gem_create(struct drm_device
*dev
,
137 struct xen_gem_object
*xen_obj
;
139 xen_obj
= gem_create(dev
, size
);
140 if (IS_ERR_OR_NULL(xen_obj
))
141 return ERR_CAST(xen_obj
);
143 return &xen_obj
->base
;
146 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object
*gem_obj
)
148 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
150 if (xen_obj
->base
.import_attach
) {
151 drm_prime_gem_destroy(&xen_obj
->base
, xen_obj
->sgt_imported
);
152 gem_free_pages_array(xen_obj
);
154 if (xen_obj
->pages
) {
155 if (xen_obj
->be_alloc
) {
156 free_xenballooned_pages(xen_obj
->num_pages
,
158 gem_free_pages_array(xen_obj
);
160 drm_gem_put_pages(&xen_obj
->base
,
161 xen_obj
->pages
, true, false);
165 drm_gem_object_release(gem_obj
);
169 struct page
**xen_drm_front_gem_get_pages(struct drm_gem_object
*gem_obj
)
171 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
173 return xen_obj
->pages
;
176 struct sg_table
*xen_drm_front_gem_get_sg_table(struct drm_gem_object
*gem_obj
)
178 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
181 return ERR_PTR(-ENOMEM
);
183 return drm_prime_pages_to_sg(xen_obj
->pages
, xen_obj
->num_pages
);
186 struct drm_gem_object
*
187 xen_drm_front_gem_import_sg_table(struct drm_device
*dev
,
188 struct dma_buf_attachment
*attach
,
189 struct sg_table
*sgt
)
191 struct xen_drm_front_drm_info
*drm_info
= dev
->dev_private
;
192 struct xen_gem_object
*xen_obj
;
196 size
= attach
->dmabuf
->size
;
197 xen_obj
= gem_create_obj(dev
, size
);
198 if (IS_ERR_OR_NULL(xen_obj
))
199 return ERR_CAST(xen_obj
);
201 ret
= gem_alloc_pages_array(xen_obj
, size
);
205 xen_obj
->sgt_imported
= sgt
;
207 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, xen_obj
->pages
,
208 NULL
, xen_obj
->num_pages
);
212 ret
= xen_drm_front_dbuf_create(drm_info
->front_info
,
213 xen_drm_front_dbuf_to_cookie(&xen_obj
->base
),
214 0, 0, 0, size
, xen_obj
->pages
);
218 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
221 return &xen_obj
->base
;
224 static int gem_mmap_obj(struct xen_gem_object
*xen_obj
,
225 struct vm_area_struct
*vma
)
227 unsigned long addr
= vma
->vm_start
;
231 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
232 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
235 vma
->vm_flags
&= ~VM_PFNMAP
;
236 vma
->vm_flags
|= VM_MIXEDMAP
;
239 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
240 * all memory which is shared with other entities in the system
241 * (including the hypervisor and other guests) must reside in memory
242 * which is mapped as Normal Inner Write-Back Outer Write-Back
245 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
248 * vm_operations_struct.fault handler will be called if CPU access
249 * to VM is here. For GPUs this isn't the case, because CPU
250 * doesn't touch the memory. Insert pages now, so both CPU and GPU are
252 * FIXME: as we insert all the pages now then no .fault handler must
253 * be called, so don't provide one
255 for (i
= 0; i
< xen_obj
->num_pages
; i
++) {
258 ret
= vm_insert_page(vma
, addr
, xen_obj
->pages
[i
]);
260 DRM_ERROR("Failed to insert pages into vma: %d\n", ret
);
269 int xen_drm_front_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
271 struct xen_gem_object
*xen_obj
;
272 struct drm_gem_object
*gem_obj
;
275 ret
= drm_gem_mmap(filp
, vma
);
279 gem_obj
= vma
->vm_private_data
;
280 xen_obj
= to_xen_gem_obj(gem_obj
);
281 return gem_mmap_obj(xen_obj
, vma
);
284 void *xen_drm_front_gem_prime_vmap(struct drm_gem_object
*gem_obj
)
286 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
291 /* Please see comment in gem_mmap_obj on mapping and attributes. */
292 return vmap(xen_obj
->pages
, xen_obj
->num_pages
,
293 VM_MAP
, PAGE_KERNEL
);
296 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object
*gem_obj
,
302 int xen_drm_front_gem_prime_mmap(struct drm_gem_object
*gem_obj
,
303 struct vm_area_struct
*vma
)
305 struct xen_gem_object
*xen_obj
;
308 ret
= drm_gem_mmap_obj(gem_obj
, gem_obj
->size
, vma
);
312 xen_obj
= to_xen_gem_obj(gem_obj
);
313 return gem_mmap_obj(xen_obj
, vma
);