2 * SPDX-License-Identifier: MIT
4 * Copyright 2012 Red Hat Inc
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9 #include <linux/dma-resv.h>
12 #include "i915_gem_object.h"
13 #include "i915_scatterlist.h"
15 static struct drm_i915_gem_object
*dma_buf_to_obj(struct dma_buf
*buf
)
17 return to_intel_bo(buf
->priv
);
20 static struct sg_table
*i915_gem_map_dma_buf(struct dma_buf_attachment
*attachment
,
21 enum dma_data_direction dir
)
23 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
25 struct scatterlist
*src
, *dst
;
28 ret
= i915_gem_object_pin_pages(obj
);
32 /* Copy sg so that we make an independent mapping */
33 st
= kmalloc(sizeof(struct sg_table
), GFP_KERNEL
);
39 ret
= sg_alloc_table(st
, obj
->mm
.pages
->nents
, GFP_KERNEL
);
43 src
= obj
->mm
.pages
->sgl
;
45 for (i
= 0; i
< obj
->mm
.pages
->nents
; i
++) {
46 sg_set_page(dst
, sg_page(src
), src
->length
, 0);
51 ret
= dma_map_sgtable(attachment
->dev
, st
, dir
, DMA_ATTR_SKIP_CPU_SYNC
);
62 i915_gem_object_unpin_pages(obj
);
67 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
69 enum dma_data_direction dir
)
71 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
73 dma_unmap_sgtable(attachment
->dev
, sg
, dir
, DMA_ATTR_SKIP_CPU_SYNC
);
77 i915_gem_object_unpin_pages(obj
);
80 static int i915_gem_dmabuf_vmap(struct dma_buf
*dma_buf
, struct dma_buf_map
*map
)
82 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
85 vaddr
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
87 return PTR_ERR(vaddr
);
89 dma_buf_map_set_vaddr(map
, vaddr
);
94 static void i915_gem_dmabuf_vunmap(struct dma_buf
*dma_buf
, struct dma_buf_map
*map
)
96 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
98 i915_gem_object_flush_map(obj
);
99 i915_gem_object_unpin_map(obj
);
102 static int i915_gem_dmabuf_mmap(struct dma_buf
*dma_buf
, struct vm_area_struct
*vma
)
104 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
107 if (obj
->base
.size
< vma
->vm_end
- vma
->vm_start
)
113 ret
= call_mmap(obj
->base
.filp
, vma
);
117 vma_set_file(vma
, obj
->base
.filp
);
122 static int i915_gem_begin_cpu_access(struct dma_buf
*dma_buf
, enum dma_data_direction direction
)
124 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
125 bool write
= (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_TO_DEVICE
);
128 err
= i915_gem_object_pin_pages(obj
);
132 err
= i915_gem_object_lock_interruptible(obj
, NULL
);
136 err
= i915_gem_object_set_to_cpu_domain(obj
, write
);
137 i915_gem_object_unlock(obj
);
140 i915_gem_object_unpin_pages(obj
);
144 static int i915_gem_end_cpu_access(struct dma_buf
*dma_buf
, enum dma_data_direction direction
)
146 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
149 err
= i915_gem_object_pin_pages(obj
);
153 err
= i915_gem_object_lock_interruptible(obj
, NULL
);
157 err
= i915_gem_object_set_to_gtt_domain(obj
, false);
158 i915_gem_object_unlock(obj
);
161 i915_gem_object_unpin_pages(obj
);
165 static const struct dma_buf_ops i915_dmabuf_ops
= {
166 .map_dma_buf
= i915_gem_map_dma_buf
,
167 .unmap_dma_buf
= i915_gem_unmap_dma_buf
,
168 .release
= drm_gem_dmabuf_release
,
169 .mmap
= i915_gem_dmabuf_mmap
,
170 .vmap
= i915_gem_dmabuf_vmap
,
171 .vunmap
= i915_gem_dmabuf_vunmap
,
172 .begin_cpu_access
= i915_gem_begin_cpu_access
,
173 .end_cpu_access
= i915_gem_end_cpu_access
,
176 struct dma_buf
*i915_gem_prime_export(struct drm_gem_object
*gem_obj
, int flags
)
178 struct drm_i915_gem_object
*obj
= to_intel_bo(gem_obj
);
179 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
181 exp_info
.ops
= &i915_dmabuf_ops
;
182 exp_info
.size
= gem_obj
->size
;
183 exp_info
.flags
= flags
;
184 exp_info
.priv
= gem_obj
;
185 exp_info
.resv
= obj
->base
.resv
;
187 if (obj
->ops
->dmabuf_export
) {
188 int ret
= obj
->ops
->dmabuf_export(obj
);
193 return drm_gem_dmabuf_export(gem_obj
->dev
, &exp_info
);
196 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object
*obj
)
198 struct sg_table
*pages
;
199 unsigned int sg_page_sizes
;
201 pages
= dma_buf_map_attachment(obj
->base
.import_attach
,
204 return PTR_ERR(pages
);
206 sg_page_sizes
= i915_sg_page_sizes(pages
->sgl
);
208 __i915_gem_object_set_pages(obj
, pages
, sg_page_sizes
);
213 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object
*obj
,
214 struct sg_table
*pages
)
216 dma_buf_unmap_attachment(obj
->base
.import_attach
, pages
,
220 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops
= {
221 .name
= "i915_gem_object_dmabuf",
222 .get_pages
= i915_gem_object_get_pages_dmabuf
,
223 .put_pages
= i915_gem_object_put_pages_dmabuf
,
226 struct drm_gem_object
*i915_gem_prime_import(struct drm_device
*dev
,
227 struct dma_buf
*dma_buf
)
229 static struct lock_class_key lock_class
;
230 struct dma_buf_attachment
*attach
;
231 struct drm_i915_gem_object
*obj
;
234 /* is this one of own objects? */
235 if (dma_buf
->ops
== &i915_dmabuf_ops
) {
236 obj
= dma_buf_to_obj(dma_buf
);
237 /* is it from our device? */
238 if (obj
->base
.dev
== dev
) {
240 * Importing dmabuf exported from out own gem increases
241 * refcount on gem itself instead of f_count of dmabuf.
243 return &i915_gem_object_get(obj
)->base
;
248 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
250 return ERR_CAST(attach
);
252 get_dma_buf(dma_buf
);
254 obj
= i915_gem_object_alloc();
260 drm_gem_private_object_init(dev
, &obj
->base
, dma_buf
->size
);
261 i915_gem_object_init(obj
, &i915_gem_object_dmabuf_ops
, &lock_class
);
262 obj
->base
.import_attach
= attach
;
263 obj
->base
.resv
= dma_buf
->resv
;
265 /* We use GTT as shorthand for a coherent domain, one that is
266 * neither in the GPU cache nor in the CPU cache, where all
267 * writes are immediately visible in memory. (That's not strictly
268 * true, but it's close! There are internal buffers such as the
269 * write-combined buffer or a delay through the chipset for GTT
270 * writes that do require us to treat GTT as a separate cache domain.)
272 obj
->read_domains
= I915_GEM_DOMAIN_GTT
;
273 obj
->write_domain
= 0;
278 dma_buf_detach(dma_buf
, attach
);
279 dma_buf_put(dma_buf
);
284 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
285 #include "selftests/mock_dmabuf.c"
286 #include "selftests/i915_gem_dmabuf.c"