dt-bindings: mtd: ingenic: Use standard ecc-engine property
[linux/fpc-iii.git] / drivers / gpu / drm / xen / xen_drm_front_gem.c
blob53c376d55fcf5089010e4741637ab55f2679afb9
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 /*
4 * Xen para-virtual DRM device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
11 #include "xen_drm_front_gem.h"
13 #include <drm/drmP.h>
14 #include <drm/drm_fb_helper.h>
15 #include <drm/drm_gem.h>
16 #include <drm/drm_probe_helper.h>
18 #include <linux/dma-buf.h>
19 #include <linux/scatterlist.h>
20 #include <linux/shmem_fs.h>
22 #include <xen/balloon.h>
24 #include "xen_drm_front.h"
26 struct xen_gem_object {
27 struct drm_gem_object base;
29 size_t num_pages;
30 struct page **pages;
32 /* set for buffers allocated by the backend */
33 bool be_alloc;
35 /* this is for imported PRIME buffer */
36 struct sg_table *sgt_imported;
39 static inline struct xen_gem_object *
40 to_xen_gem_obj(struct drm_gem_object *gem_obj)
42 return container_of(gem_obj, struct xen_gem_object, base);
45 static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
46 size_t buf_size)
48 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
50 sizeof(struct page *), GFP_KERNEL);
51 return !xen_obj->pages ? -ENOMEM : 0;
54 static void gem_free_pages_array(struct xen_gem_object *xen_obj)
56 kvfree(xen_obj->pages);
57 xen_obj->pages = NULL;
60 static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
61 size_t size)
63 struct xen_gem_object *xen_obj;
64 int ret;
66 xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
67 if (!xen_obj)
68 return ERR_PTR(-ENOMEM);
70 ret = drm_gem_object_init(dev, &xen_obj->base, size);
71 if (ret < 0) {
72 kfree(xen_obj);
73 return ERR_PTR(ret);
76 return xen_obj;
79 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
81 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
82 struct xen_gem_object *xen_obj;
83 int ret;
85 size = round_up(size, PAGE_SIZE);
86 xen_obj = gem_create_obj(dev, size);
87 if (IS_ERR_OR_NULL(xen_obj))
88 return xen_obj;
90 if (drm_info->front_info->cfg.be_alloc) {
92 * backend will allocate space for this buffer, so
93 * only allocate array of pointers to pages
95 ret = gem_alloc_pages_array(xen_obj, size);
96 if (ret < 0)
97 goto fail;
100 * allocate ballooned pages which will be used to map
101 * grant references provided by the backend
103 ret = alloc_xenballooned_pages(xen_obj->num_pages,
104 xen_obj->pages);
105 if (ret < 0) {
106 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
107 xen_obj->num_pages, ret);
108 gem_free_pages_array(xen_obj);
109 goto fail;
112 xen_obj->be_alloc = true;
113 return xen_obj;
116 * need to allocate backing pages now, so we can share those
117 * with the backend
119 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
120 xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
121 if (IS_ERR_OR_NULL(xen_obj->pages)) {
122 ret = PTR_ERR(xen_obj->pages);
123 xen_obj->pages = NULL;
124 goto fail;
127 return xen_obj;
129 fail:
130 DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
131 return ERR_PTR(ret);
134 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
135 size_t size)
137 struct xen_gem_object *xen_obj;
139 xen_obj = gem_create(dev, size);
140 if (IS_ERR_OR_NULL(xen_obj))
141 return ERR_CAST(xen_obj);
143 return &xen_obj->base;
146 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
148 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
150 if (xen_obj->base.import_attach) {
151 drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
152 gem_free_pages_array(xen_obj);
153 } else {
154 if (xen_obj->pages) {
155 if (xen_obj->be_alloc) {
156 free_xenballooned_pages(xen_obj->num_pages,
157 xen_obj->pages);
158 gem_free_pages_array(xen_obj);
159 } else {
160 drm_gem_put_pages(&xen_obj->base,
161 xen_obj->pages, true, false);
165 drm_gem_object_release(gem_obj);
166 kfree(xen_obj);
169 struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
171 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
173 return xen_obj->pages;
176 struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
178 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
180 if (!xen_obj->pages)
181 return ERR_PTR(-ENOMEM);
183 return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
186 struct drm_gem_object *
187 xen_drm_front_gem_import_sg_table(struct drm_device *dev,
188 struct dma_buf_attachment *attach,
189 struct sg_table *sgt)
191 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
192 struct xen_gem_object *xen_obj;
193 size_t size;
194 int ret;
196 size = attach->dmabuf->size;
197 xen_obj = gem_create_obj(dev, size);
198 if (IS_ERR_OR_NULL(xen_obj))
199 return ERR_CAST(xen_obj);
201 ret = gem_alloc_pages_array(xen_obj, size);
202 if (ret < 0)
203 return ERR_PTR(ret);
205 xen_obj->sgt_imported = sgt;
207 ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
208 NULL, xen_obj->num_pages);
209 if (ret < 0)
210 return ERR_PTR(ret);
212 ret = xen_drm_front_dbuf_create(drm_info->front_info,
213 xen_drm_front_dbuf_to_cookie(&xen_obj->base),
214 0, 0, 0, size, xen_obj->pages);
215 if (ret < 0)
216 return ERR_PTR(ret);
218 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
219 size, sgt->nents);
221 return &xen_obj->base;
224 static int gem_mmap_obj(struct xen_gem_object *xen_obj,
225 struct vm_area_struct *vma)
227 unsigned long addr = vma->vm_start;
228 int i;
231 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
232 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
233 * the whole buffer.
235 vma->vm_flags &= ~VM_PFNMAP;
236 vma->vm_flags |= VM_MIXEDMAP;
237 vma->vm_pgoff = 0;
239 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
240 * all memory which is shared with other entities in the system
241 * (including the hypervisor and other guests) must reside in memory
242 * which is mapped as Normal Inner Write-Back Outer Write-Back
243 * Inner-Shareable.
245 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
248 * vm_operations_struct.fault handler will be called if CPU access
249 * to VM is here. For GPUs this isn't the case, because CPU
250 * doesn't touch the memory. Insert pages now, so both CPU and GPU are
251 * happy.
252 * FIXME: as we insert all the pages now then no .fault handler must
253 * be called, so don't provide one
255 for (i = 0; i < xen_obj->num_pages; i++) {
256 int ret;
258 ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
259 if (ret < 0) {
260 DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
261 return ret;
264 addr += PAGE_SIZE;
266 return 0;
269 int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
271 struct xen_gem_object *xen_obj;
272 struct drm_gem_object *gem_obj;
273 int ret;
275 ret = drm_gem_mmap(filp, vma);
276 if (ret < 0)
277 return ret;
279 gem_obj = vma->vm_private_data;
280 xen_obj = to_xen_gem_obj(gem_obj);
281 return gem_mmap_obj(xen_obj, vma);
284 void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
286 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
288 if (!xen_obj->pages)
289 return NULL;
291 /* Please see comment in gem_mmap_obj on mapping and attributes. */
292 return vmap(xen_obj->pages, xen_obj->num_pages,
293 VM_MAP, PAGE_KERNEL);
296 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
297 void *vaddr)
299 vunmap(vaddr);
302 int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
303 struct vm_area_struct *vma)
305 struct xen_gem_object *xen_obj;
306 int ret;
308 ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
309 if (ret < 0)
310 return ret;
312 xen_obj = to_xen_gem_obj(gem_obj);
313 return gem_mmap_obj(xen_obj, vma);