WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gem / i915_gem_dmabuf.c
blob04e9c04545ad4db5aaf00ae956be6da2cb10831c
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright 2012 Red Hat Inc
5 */
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9 #include <linux/dma-resv.h>
11 #include "i915_drv.h"
12 #include "i915_gem_object.h"
13 #include "i915_scatterlist.h"
15 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
17 return to_intel_bo(buf->priv);
20 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
21 enum dma_data_direction dir)
23 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
24 struct sg_table *st;
25 struct scatterlist *src, *dst;
26 int ret, i;
28 ret = i915_gem_object_pin_pages(obj);
29 if (ret)
30 goto err;
32 /* Copy sg so that we make an independent mapping */
33 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
34 if (st == NULL) {
35 ret = -ENOMEM;
36 goto err_unpin_pages;
39 ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
40 if (ret)
41 goto err_free;
43 src = obj->mm.pages->sgl;
44 dst = st->sgl;
45 for (i = 0; i < obj->mm.pages->nents; i++) {
46 sg_set_page(dst, sg_page(src), src->length, 0);
47 dst = sg_next(dst);
48 src = sg_next(src);
51 ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
52 if (ret)
53 goto err_free_sg;
55 return st;
57 err_free_sg:
58 sg_free_table(st);
59 err_free:
60 kfree(st);
61 err_unpin_pages:
62 i915_gem_object_unpin_pages(obj);
63 err:
64 return ERR_PTR(ret);
67 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
68 struct sg_table *sg,
69 enum dma_data_direction dir)
71 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
73 dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
74 sg_free_table(sg);
75 kfree(sg);
77 i915_gem_object_unpin_pages(obj);
80 static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
82 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
83 void *vaddr;
85 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
86 if (IS_ERR(vaddr))
87 return PTR_ERR(vaddr);
89 dma_buf_map_set_vaddr(map, vaddr);
91 return 0;
94 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
96 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
98 i915_gem_object_flush_map(obj);
99 i915_gem_object_unpin_map(obj);
102 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
104 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
105 int ret;
107 if (obj->base.size < vma->vm_end - vma->vm_start)
108 return -EINVAL;
110 if (!obj->base.filp)
111 return -ENODEV;
113 ret = call_mmap(obj->base.filp, vma);
114 if (ret)
115 return ret;
117 vma_set_file(vma, obj->base.filp);
119 return 0;
122 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
124 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
125 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
126 int err;
128 err = i915_gem_object_pin_pages(obj);
129 if (err)
130 return err;
132 err = i915_gem_object_lock_interruptible(obj, NULL);
133 if (err)
134 goto out;
136 err = i915_gem_object_set_to_cpu_domain(obj, write);
137 i915_gem_object_unlock(obj);
139 out:
140 i915_gem_object_unpin_pages(obj);
141 return err;
144 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
146 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
147 int err;
149 err = i915_gem_object_pin_pages(obj);
150 if (err)
151 return err;
153 err = i915_gem_object_lock_interruptible(obj, NULL);
154 if (err)
155 goto out;
157 err = i915_gem_object_set_to_gtt_domain(obj, false);
158 i915_gem_object_unlock(obj);
160 out:
161 i915_gem_object_unpin_pages(obj);
162 return err;
165 static const struct dma_buf_ops i915_dmabuf_ops = {
166 .map_dma_buf = i915_gem_map_dma_buf,
167 .unmap_dma_buf = i915_gem_unmap_dma_buf,
168 .release = drm_gem_dmabuf_release,
169 .mmap = i915_gem_dmabuf_mmap,
170 .vmap = i915_gem_dmabuf_vmap,
171 .vunmap = i915_gem_dmabuf_vunmap,
172 .begin_cpu_access = i915_gem_begin_cpu_access,
173 .end_cpu_access = i915_gem_end_cpu_access,
176 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
178 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
179 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
181 exp_info.ops = &i915_dmabuf_ops;
182 exp_info.size = gem_obj->size;
183 exp_info.flags = flags;
184 exp_info.priv = gem_obj;
185 exp_info.resv = obj->base.resv;
187 if (obj->ops->dmabuf_export) {
188 int ret = obj->ops->dmabuf_export(obj);
189 if (ret)
190 return ERR_PTR(ret);
193 return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
196 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
198 struct sg_table *pages;
199 unsigned int sg_page_sizes;
201 pages = dma_buf_map_attachment(obj->base.import_attach,
202 DMA_BIDIRECTIONAL);
203 if (IS_ERR(pages))
204 return PTR_ERR(pages);
206 sg_page_sizes = i915_sg_page_sizes(pages->sgl);
208 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
210 return 0;
213 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
214 struct sg_table *pages)
216 dma_buf_unmap_attachment(obj->base.import_attach, pages,
217 DMA_BIDIRECTIONAL);
220 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
221 .name = "i915_gem_object_dmabuf",
222 .get_pages = i915_gem_object_get_pages_dmabuf,
223 .put_pages = i915_gem_object_put_pages_dmabuf,
226 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
227 struct dma_buf *dma_buf)
229 static struct lock_class_key lock_class;
230 struct dma_buf_attachment *attach;
231 struct drm_i915_gem_object *obj;
232 int ret;
234 /* is this one of own objects? */
235 if (dma_buf->ops == &i915_dmabuf_ops) {
236 obj = dma_buf_to_obj(dma_buf);
237 /* is it from our device? */
238 if (obj->base.dev == dev) {
240 * Importing dmabuf exported from out own gem increases
241 * refcount on gem itself instead of f_count of dmabuf.
243 return &i915_gem_object_get(obj)->base;
247 /* need to attach */
248 attach = dma_buf_attach(dma_buf, dev->dev);
249 if (IS_ERR(attach))
250 return ERR_CAST(attach);
252 get_dma_buf(dma_buf);
254 obj = i915_gem_object_alloc();
255 if (obj == NULL) {
256 ret = -ENOMEM;
257 goto fail_detach;
260 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
261 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
262 obj->base.import_attach = attach;
263 obj->base.resv = dma_buf->resv;
265 /* We use GTT as shorthand for a coherent domain, one that is
266 * neither in the GPU cache nor in the CPU cache, where all
267 * writes are immediately visible in memory. (That's not strictly
268 * true, but it's close! There are internal buffers such as the
269 * write-combined buffer or a delay through the chipset for GTT
270 * writes that do require us to treat GTT as a separate cache domain.)
272 obj->read_domains = I915_GEM_DOMAIN_GTT;
273 obj->write_domain = 0;
275 return &obj->base;
277 fail_detach:
278 dma_buf_detach(dma_buf, attach);
279 dma_buf_put(dma_buf);
281 return ERR_PTR(ret);
284 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
285 #include "selftests/mock_dmabuf.c"
286 #include "selftests/i915_gem_dmabuf.c"
287 #endif