1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Xen para-virtual DRM device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
11 #include <linux/dma-buf.h>
12 #include <linux/scatterlist.h>
13 #include <linux/shmem_fs.h>
15 #include <drm/drm_fb_helper.h>
16 #include <drm/drm_gem.h>
17 #include <drm/drm_prime.h>
18 #include <drm/drm_probe_helper.h>
20 #include <xen/balloon.h>
22 #include "xen_drm_front.h"
23 #include "xen_drm_front_gem.h"
25 struct xen_gem_object
{
26 struct drm_gem_object base
;
31 /* set for buffers allocated by the backend */
34 /* this is for imported PRIME buffer */
35 struct sg_table
*sgt_imported
;
38 static inline struct xen_gem_object
*
39 to_xen_gem_obj(struct drm_gem_object
*gem_obj
)
41 return container_of(gem_obj
, struct xen_gem_object
, base
);
44 static int gem_alloc_pages_array(struct xen_gem_object
*xen_obj
,
47 xen_obj
->num_pages
= DIV_ROUND_UP(buf_size
, PAGE_SIZE
);
48 xen_obj
->pages
= kvmalloc_array(xen_obj
->num_pages
,
49 sizeof(struct page
*), GFP_KERNEL
);
50 return !xen_obj
->pages
? -ENOMEM
: 0;
53 static void gem_free_pages_array(struct xen_gem_object
*xen_obj
)
55 kvfree(xen_obj
->pages
);
56 xen_obj
->pages
= NULL
;
59 static struct xen_gem_object
*gem_create_obj(struct drm_device
*dev
,
62 struct xen_gem_object
*xen_obj
;
65 xen_obj
= kzalloc(sizeof(*xen_obj
), GFP_KERNEL
);
67 return ERR_PTR(-ENOMEM
);
69 ret
= drm_gem_object_init(dev
, &xen_obj
->base
, size
);
78 static struct xen_gem_object
*gem_create(struct drm_device
*dev
, size_t size
)
80 struct xen_drm_front_drm_info
*drm_info
= dev
->dev_private
;
81 struct xen_gem_object
*xen_obj
;
84 size
= round_up(size
, PAGE_SIZE
);
85 xen_obj
= gem_create_obj(dev
, size
);
86 if (IS_ERR_OR_NULL(xen_obj
))
89 if (drm_info
->front_info
->cfg
.be_alloc
) {
91 * backend will allocate space for this buffer, so
92 * only allocate array of pointers to pages
94 ret
= gem_alloc_pages_array(xen_obj
, size
);
99 * allocate ballooned pages which will be used to map
100 * grant references provided by the backend
102 ret
= alloc_xenballooned_pages(xen_obj
->num_pages
,
105 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
106 xen_obj
->num_pages
, ret
);
107 gem_free_pages_array(xen_obj
);
111 xen_obj
->be_alloc
= true;
115 * need to allocate backing pages now, so we can share those
118 xen_obj
->num_pages
= DIV_ROUND_UP(size
, PAGE_SIZE
);
119 xen_obj
->pages
= drm_gem_get_pages(&xen_obj
->base
);
120 if (IS_ERR_OR_NULL(xen_obj
->pages
)) {
121 ret
= PTR_ERR(xen_obj
->pages
);
122 xen_obj
->pages
= NULL
;
129 DRM_ERROR("Failed to allocate buffer with size %zu\n", size
);
133 struct drm_gem_object
*xen_drm_front_gem_create(struct drm_device
*dev
,
136 struct xen_gem_object
*xen_obj
;
138 xen_obj
= gem_create(dev
, size
);
139 if (IS_ERR_OR_NULL(xen_obj
))
140 return ERR_CAST(xen_obj
);
142 return &xen_obj
->base
;
145 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object
*gem_obj
)
147 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
149 if (xen_obj
->base
.import_attach
) {
150 drm_prime_gem_destroy(&xen_obj
->base
, xen_obj
->sgt_imported
);
151 gem_free_pages_array(xen_obj
);
153 if (xen_obj
->pages
) {
154 if (xen_obj
->be_alloc
) {
155 free_xenballooned_pages(xen_obj
->num_pages
,
157 gem_free_pages_array(xen_obj
);
159 drm_gem_put_pages(&xen_obj
->base
,
160 xen_obj
->pages
, true, false);
164 drm_gem_object_release(gem_obj
);
168 struct page
**xen_drm_front_gem_get_pages(struct drm_gem_object
*gem_obj
)
170 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
172 return xen_obj
->pages
;
175 struct sg_table
*xen_drm_front_gem_get_sg_table(struct drm_gem_object
*gem_obj
)
177 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
180 return ERR_PTR(-ENOMEM
);
182 return drm_prime_pages_to_sg(xen_obj
->pages
, xen_obj
->num_pages
);
185 struct drm_gem_object
*
186 xen_drm_front_gem_import_sg_table(struct drm_device
*dev
,
187 struct dma_buf_attachment
*attach
,
188 struct sg_table
*sgt
)
190 struct xen_drm_front_drm_info
*drm_info
= dev
->dev_private
;
191 struct xen_gem_object
*xen_obj
;
195 size
= attach
->dmabuf
->size
;
196 xen_obj
= gem_create_obj(dev
, size
);
197 if (IS_ERR_OR_NULL(xen_obj
))
198 return ERR_CAST(xen_obj
);
200 ret
= gem_alloc_pages_array(xen_obj
, size
);
204 xen_obj
->sgt_imported
= sgt
;
206 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, xen_obj
->pages
,
207 NULL
, xen_obj
->num_pages
);
211 ret
= xen_drm_front_dbuf_create(drm_info
->front_info
,
212 xen_drm_front_dbuf_to_cookie(&xen_obj
->base
),
213 0, 0, 0, size
, xen_obj
->pages
);
217 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
220 return &xen_obj
->base
;
223 static int gem_mmap_obj(struct xen_gem_object
*xen_obj
,
224 struct vm_area_struct
*vma
)
229 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
230 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
233 vma
->vm_flags
&= ~VM_PFNMAP
;
234 vma
->vm_flags
|= VM_MIXEDMAP
;
237 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
238 * all memory which is shared with other entities in the system
239 * (including the hypervisor and other guests) must reside in memory
240 * which is mapped as Normal Inner Write-Back Outer Write-Back
243 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
246 * vm_operations_struct.fault handler will be called if CPU access
247 * to VM is here. For GPUs this isn't the case, because CPU
248 * doesn't touch the memory. Insert pages now, so both CPU and GPU are
250 * FIXME: as we insert all the pages now then no .fault handler must
251 * be called, so don't provide one
253 ret
= vm_map_pages(vma
, xen_obj
->pages
, xen_obj
->num_pages
);
255 DRM_ERROR("Failed to map pages into vma: %d\n", ret
);
260 int xen_drm_front_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
262 struct xen_gem_object
*xen_obj
;
263 struct drm_gem_object
*gem_obj
;
266 ret
= drm_gem_mmap(filp
, vma
);
270 gem_obj
= vma
->vm_private_data
;
271 xen_obj
= to_xen_gem_obj(gem_obj
);
272 return gem_mmap_obj(xen_obj
, vma
);
275 void *xen_drm_front_gem_prime_vmap(struct drm_gem_object
*gem_obj
)
277 struct xen_gem_object
*xen_obj
= to_xen_gem_obj(gem_obj
);
282 /* Please see comment in gem_mmap_obj on mapping and attributes. */
283 return vmap(xen_obj
->pages
, xen_obj
->num_pages
,
284 VM_MAP
, PAGE_KERNEL
);
287 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object
*gem_obj
,
293 int xen_drm_front_gem_prime_mmap(struct drm_gem_object
*gem_obj
,
294 struct vm_area_struct
*vma
)
296 struct xen_gem_object
*xen_obj
;
299 ret
= drm_gem_mmap_obj(gem_obj
, gem_obj
->size
, vma
);
303 xen_obj
= to_xen_gem_obj(gem_obj
);
304 return gem_mmap_obj(xen_obj
, vma
);