drm/msm/hdmi: Enable HPD after HDMI IRQ is set up
[linux/fpc-iii.git] / drivers / gpu / drm / xen / xen_drm_front_gem.c
blobc85bfe7571cbfcf28896a3d8028e7e8c93849410
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 /*
4 * Xen para-virtual DRM device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
11 #include "xen_drm_front_gem.h"
13 #include <drm/drmP.h>
14 #include <drm/drm_crtc_helper.h>
15 #include <drm/drm_fb_helper.h>
16 #include <drm/drm_gem.h>
18 #include <linux/dma-buf.h>
19 #include <linux/scatterlist.h>
20 #include <linux/shmem_fs.h>
22 #include <xen/balloon.h>
24 #include "xen_drm_front.h"
25 #include "xen_drm_front_shbuf.h"
27 struct xen_gem_object {
28 struct drm_gem_object base;
30 size_t num_pages;
31 struct page **pages;
33 /* set for buffers allocated by the backend */
34 bool be_alloc;
36 /* this is for imported PRIME buffer */
37 struct sg_table *sgt_imported;
40 static inline struct xen_gem_object *
41 to_xen_gem_obj(struct drm_gem_object *gem_obj)
43 return container_of(gem_obj, struct xen_gem_object, base);
46 static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
47 size_t buf_size)
49 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
50 xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
51 sizeof(struct page *), GFP_KERNEL);
52 return !xen_obj->pages ? -ENOMEM : 0;
55 static void gem_free_pages_array(struct xen_gem_object *xen_obj)
57 kvfree(xen_obj->pages);
58 xen_obj->pages = NULL;
61 static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
62 size_t size)
64 struct xen_gem_object *xen_obj;
65 int ret;
67 xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
68 if (!xen_obj)
69 return ERR_PTR(-ENOMEM);
71 ret = drm_gem_object_init(dev, &xen_obj->base, size);
72 if (ret < 0) {
73 kfree(xen_obj);
74 return ERR_PTR(ret);
77 return xen_obj;
80 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
82 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
83 struct xen_gem_object *xen_obj;
84 int ret;
86 size = round_up(size, PAGE_SIZE);
87 xen_obj = gem_create_obj(dev, size);
88 if (IS_ERR_OR_NULL(xen_obj))
89 return xen_obj;
91 if (drm_info->front_info->cfg.be_alloc) {
93 * backend will allocate space for this buffer, so
94 * only allocate array of pointers to pages
96 ret = gem_alloc_pages_array(xen_obj, size);
97 if (ret < 0)
98 goto fail;
101 * allocate ballooned pages which will be used to map
102 * grant references provided by the backend
104 ret = alloc_xenballooned_pages(xen_obj->num_pages,
105 xen_obj->pages);
106 if (ret < 0) {
107 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
108 xen_obj->num_pages, ret);
109 gem_free_pages_array(xen_obj);
110 goto fail;
113 xen_obj->be_alloc = true;
114 return xen_obj;
117 * need to allocate backing pages now, so we can share those
118 * with the backend
120 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
121 xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
122 if (IS_ERR_OR_NULL(xen_obj->pages)) {
123 ret = PTR_ERR(xen_obj->pages);
124 xen_obj->pages = NULL;
125 goto fail;
128 return xen_obj;
130 fail:
131 DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
132 return ERR_PTR(ret);
135 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
136 size_t size)
138 struct xen_gem_object *xen_obj;
140 xen_obj = gem_create(dev, size);
141 if (IS_ERR_OR_NULL(xen_obj))
142 return ERR_CAST(xen_obj);
144 return &xen_obj->base;
147 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
149 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
151 if (xen_obj->base.import_attach) {
152 drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
153 gem_free_pages_array(xen_obj);
154 } else {
155 if (xen_obj->pages) {
156 if (xen_obj->be_alloc) {
157 free_xenballooned_pages(xen_obj->num_pages,
158 xen_obj->pages);
159 gem_free_pages_array(xen_obj);
160 } else {
161 drm_gem_put_pages(&xen_obj->base,
162 xen_obj->pages, true, false);
166 drm_gem_object_release(gem_obj);
167 kfree(xen_obj);
170 struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
172 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
174 return xen_obj->pages;
177 struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
179 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
181 if (!xen_obj->pages)
182 return NULL;
184 return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
187 struct drm_gem_object *
188 xen_drm_front_gem_import_sg_table(struct drm_device *dev,
189 struct dma_buf_attachment *attach,
190 struct sg_table *sgt)
192 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
193 struct xen_gem_object *xen_obj;
194 size_t size;
195 int ret;
197 size = attach->dmabuf->size;
198 xen_obj = gem_create_obj(dev, size);
199 if (IS_ERR_OR_NULL(xen_obj))
200 return ERR_CAST(xen_obj);
202 ret = gem_alloc_pages_array(xen_obj, size);
203 if (ret < 0)
204 return ERR_PTR(ret);
206 xen_obj->sgt_imported = sgt;
208 ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
209 NULL, xen_obj->num_pages);
210 if (ret < 0)
211 return ERR_PTR(ret);
213 ret = xen_drm_front_dbuf_create(drm_info->front_info,
214 xen_drm_front_dbuf_to_cookie(&xen_obj->base),
215 0, 0, 0, size, xen_obj->pages);
216 if (ret < 0)
217 return ERR_PTR(ret);
219 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
220 size, sgt->nents);
222 return &xen_obj->base;
225 static int gem_mmap_obj(struct xen_gem_object *xen_obj,
226 struct vm_area_struct *vma)
228 unsigned long addr = vma->vm_start;
229 int i;
232 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
233 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
234 * the whole buffer.
236 vma->vm_flags &= ~VM_PFNMAP;
237 vma->vm_flags |= VM_MIXEDMAP;
238 vma->vm_pgoff = 0;
239 vma->vm_page_prot =
240 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
243 * vm_operations_struct.fault handler will be called if CPU access
244 * to VM is here. For GPUs this isn't the case, because CPU
245 * doesn't touch the memory. Insert pages now, so both CPU and GPU are
246 * happy.
247 * FIXME: as we insert all the pages now then no .fault handler must
248 * be called, so don't provide one
250 for (i = 0; i < xen_obj->num_pages; i++) {
251 int ret;
253 ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
254 if (ret < 0) {
255 DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
256 return ret;
259 addr += PAGE_SIZE;
261 return 0;
264 int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
266 struct xen_gem_object *xen_obj;
267 struct drm_gem_object *gem_obj;
268 int ret;
270 ret = drm_gem_mmap(filp, vma);
271 if (ret < 0)
272 return ret;
274 gem_obj = vma->vm_private_data;
275 xen_obj = to_xen_gem_obj(gem_obj);
276 return gem_mmap_obj(xen_obj, vma);
279 void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
281 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
283 if (!xen_obj->pages)
284 return NULL;
286 return vmap(xen_obj->pages, xen_obj->num_pages,
287 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
290 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
291 void *vaddr)
293 vunmap(vaddr);
296 int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
297 struct vm_area_struct *vma)
299 struct xen_gem_object *xen_obj;
300 int ret;
302 ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
303 if (ret < 0)
304 return ret;
306 xen_obj = to_xen_gem_obj(gem_obj);
307 return gem_mmap_obj(xen_obj, vma);