2 * SPDX-License-Identifier: MIT
4 * Copyright 2012 Red Hat Inc
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9 #include <linux/dma-resv.h>
12 #include "i915_gem_object.h"
13 #include "i915_scatterlist.h"
15 static struct drm_i915_gem_object
*dma_buf_to_obj(struct dma_buf
*buf
)
17 return to_intel_bo(buf
->priv
);
20 static struct sg_table
*i915_gem_map_dma_buf(struct dma_buf_attachment
*attachment
,
21 enum dma_data_direction dir
)
23 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
25 struct scatterlist
*src
, *dst
;
28 ret
= i915_gem_object_pin_pages(obj
);
32 /* Copy sg so that we make an independent mapping */
33 st
= kmalloc(sizeof(struct sg_table
), GFP_KERNEL
);
39 ret
= sg_alloc_table(st
, obj
->mm
.pages
->nents
, GFP_KERNEL
);
43 src
= obj
->mm
.pages
->sgl
;
45 for (i
= 0; i
< obj
->mm
.pages
->nents
; i
++) {
46 sg_set_page(dst
, sg_page(src
), src
->length
, 0);
51 if (!dma_map_sg(attachment
->dev
, st
->sgl
, st
->nents
, dir
)) {
63 i915_gem_object_unpin_pages(obj
);
68 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
70 enum dma_data_direction dir
)
72 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
74 dma_unmap_sg(attachment
->dev
, sg
->sgl
, sg
->nents
, dir
);
78 i915_gem_object_unpin_pages(obj
);
81 static void *i915_gem_dmabuf_vmap(struct dma_buf
*dma_buf
)
83 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
85 return i915_gem_object_pin_map(obj
, I915_MAP_WB
);
88 static void i915_gem_dmabuf_vunmap(struct dma_buf
*dma_buf
, void *vaddr
)
90 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
92 i915_gem_object_flush_map(obj
);
93 i915_gem_object_unpin_map(obj
);
96 static int i915_gem_dmabuf_mmap(struct dma_buf
*dma_buf
, struct vm_area_struct
*vma
)
98 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
101 if (obj
->base
.size
< vma
->vm_end
- vma
->vm_start
)
107 ret
= call_mmap(obj
->base
.filp
, vma
);
112 vma
->vm_file
= get_file(obj
->base
.filp
);
117 static int i915_gem_begin_cpu_access(struct dma_buf
*dma_buf
, enum dma_data_direction direction
)
119 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
120 bool write
= (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_TO_DEVICE
);
123 err
= i915_gem_object_pin_pages(obj
);
127 err
= i915_gem_object_lock_interruptible(obj
);
131 err
= i915_gem_object_set_to_cpu_domain(obj
, write
);
132 i915_gem_object_unlock(obj
);
135 i915_gem_object_unpin_pages(obj
);
139 static int i915_gem_end_cpu_access(struct dma_buf
*dma_buf
, enum dma_data_direction direction
)
141 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
144 err
= i915_gem_object_pin_pages(obj
);
148 err
= i915_gem_object_lock_interruptible(obj
);
152 err
= i915_gem_object_set_to_gtt_domain(obj
, false);
153 i915_gem_object_unlock(obj
);
156 i915_gem_object_unpin_pages(obj
);
160 static const struct dma_buf_ops i915_dmabuf_ops
= {
161 .map_dma_buf
= i915_gem_map_dma_buf
,
162 .unmap_dma_buf
= i915_gem_unmap_dma_buf
,
163 .release
= drm_gem_dmabuf_release
,
164 .mmap
= i915_gem_dmabuf_mmap
,
165 .vmap
= i915_gem_dmabuf_vmap
,
166 .vunmap
= i915_gem_dmabuf_vunmap
,
167 .begin_cpu_access
= i915_gem_begin_cpu_access
,
168 .end_cpu_access
= i915_gem_end_cpu_access
,
171 struct dma_buf
*i915_gem_prime_export(struct drm_gem_object
*gem_obj
, int flags
)
173 struct drm_i915_gem_object
*obj
= to_intel_bo(gem_obj
);
174 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
176 exp_info
.ops
= &i915_dmabuf_ops
;
177 exp_info
.size
= gem_obj
->size
;
178 exp_info
.flags
= flags
;
179 exp_info
.priv
= gem_obj
;
180 exp_info
.resv
= obj
->base
.resv
;
182 if (obj
->ops
->dmabuf_export
) {
183 int ret
= obj
->ops
->dmabuf_export(obj
);
188 return drm_gem_dmabuf_export(gem_obj
->dev
, &exp_info
);
191 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object
*obj
)
193 struct sg_table
*pages
;
194 unsigned int sg_page_sizes
;
196 pages
= dma_buf_map_attachment(obj
->base
.import_attach
,
199 return PTR_ERR(pages
);
201 sg_page_sizes
= i915_sg_page_sizes(pages
->sgl
);
203 __i915_gem_object_set_pages(obj
, pages
, sg_page_sizes
);
208 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object
*obj
,
209 struct sg_table
*pages
)
211 dma_buf_unmap_attachment(obj
->base
.import_attach
, pages
,
215 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops
= {
216 .get_pages
= i915_gem_object_get_pages_dmabuf
,
217 .put_pages
= i915_gem_object_put_pages_dmabuf
,
220 struct drm_gem_object
*i915_gem_prime_import(struct drm_device
*dev
,
221 struct dma_buf
*dma_buf
)
223 static struct lock_class_key lock_class
;
224 struct dma_buf_attachment
*attach
;
225 struct drm_i915_gem_object
*obj
;
228 /* is this one of own objects? */
229 if (dma_buf
->ops
== &i915_dmabuf_ops
) {
230 obj
= dma_buf_to_obj(dma_buf
);
231 /* is it from our device? */
232 if (obj
->base
.dev
== dev
) {
234 * Importing dmabuf exported from out own gem increases
235 * refcount on gem itself instead of f_count of dmabuf.
237 return &i915_gem_object_get(obj
)->base
;
242 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
244 return ERR_CAST(attach
);
246 get_dma_buf(dma_buf
);
248 obj
= i915_gem_object_alloc();
254 drm_gem_private_object_init(dev
, &obj
->base
, dma_buf
->size
);
255 i915_gem_object_init(obj
, &i915_gem_object_dmabuf_ops
, &lock_class
);
256 obj
->base
.import_attach
= attach
;
257 obj
->base
.resv
= dma_buf
->resv
;
259 /* We use GTT as shorthand for a coherent domain, one that is
260 * neither in the GPU cache nor in the CPU cache, where all
261 * writes are immediately visible in memory. (That's not strictly
262 * true, but it's close! There are internal buffers such as the
263 * write-combined buffer or a delay through the chipset for GTT
264 * writes that do require us to treat GTT as a separate cache domain.)
266 obj
->read_domains
= I915_GEM_DOMAIN_GTT
;
267 obj
->write_domain
= 0;
272 dma_buf_detach(dma_buf
, attach
);
273 dma_buf_put(dma_buf
);
278 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
279 #include "selftests/mock_dmabuf.c"
280 #include "selftests/i915_gem_dmabuf.c"