2 * Copyright 2012 Red Hat Inc
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Dave Airlie <airlied@redhat.com>
27 #include <linux/dma-buf.h>
28 #include <linux/reservation.h>
34 static struct drm_i915_gem_object
*dma_buf_to_obj(struct dma_buf
*buf
)
36 return to_intel_bo(buf
->priv
);
39 static struct sg_table
*i915_gem_map_dma_buf(struct dma_buf_attachment
*attachment
,
40 enum dma_data_direction dir
)
42 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
44 struct scatterlist
*src
, *dst
;
47 ret
= i915_gem_object_pin_pages(obj
);
51 /* Copy sg so that we make an independent mapping */
52 st
= kmalloc(sizeof(struct sg_table
), GFP_KERNEL
);
58 ret
= sg_alloc_table(st
, obj
->mm
.pages
->nents
, GFP_KERNEL
);
62 src
= obj
->mm
.pages
->sgl
;
64 for (i
= 0; i
< obj
->mm
.pages
->nents
; i
++) {
65 sg_set_page(dst
, sg_page(src
), src
->length
, 0);
70 if (!dma_map_sg(attachment
->dev
, st
->sgl
, st
->nents
, dir
)) {
82 i915_gem_object_unpin_pages(obj
);
87 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
89 enum dma_data_direction dir
)
91 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
93 dma_unmap_sg(attachment
->dev
, sg
->sgl
, sg
->nents
, dir
);
97 i915_gem_object_unpin_pages(obj
);
100 static void *i915_gem_dmabuf_vmap(struct dma_buf
*dma_buf
)
102 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
104 return i915_gem_object_pin_map(obj
, I915_MAP_WB
);
107 static void i915_gem_dmabuf_vunmap(struct dma_buf
*dma_buf
, void *vaddr
)
109 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
111 i915_gem_object_unpin_map(obj
);
114 static void *i915_gem_dmabuf_kmap(struct dma_buf
*dma_buf
, unsigned long page_num
)
116 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
119 if (page_num
>= obj
->base
.size
>> PAGE_SHIFT
)
122 if (!i915_gem_object_has_struct_page(obj
))
125 if (i915_gem_object_pin_pages(obj
))
128 /* Synchronisation is left to the caller (via .begin_cpu_access()) */
129 page
= i915_gem_object_get_page(obj
, page_num
);
136 i915_gem_object_unpin_pages(obj
);
140 static void i915_gem_dmabuf_kunmap(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
142 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
144 kunmap(virt_to_page(addr
));
145 i915_gem_object_unpin_pages(obj
);
148 static int i915_gem_dmabuf_mmap(struct dma_buf
*dma_buf
, struct vm_area_struct
*vma
)
150 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
153 if (obj
->base
.size
< vma
->vm_end
- vma
->vm_start
)
159 ret
= call_mmap(obj
->base
.filp
, vma
);
164 vma
->vm_file
= get_file(obj
->base
.filp
);
169 static int i915_gem_begin_cpu_access(struct dma_buf
*dma_buf
, enum dma_data_direction direction
)
171 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
172 struct drm_device
*dev
= obj
->base
.dev
;
173 bool write
= (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_TO_DEVICE
);
176 err
= i915_gem_object_pin_pages(obj
);
180 err
= i915_mutex_lock_interruptible(dev
);
184 err
= i915_gem_object_set_to_cpu_domain(obj
, write
);
185 mutex_unlock(&dev
->struct_mutex
);
188 i915_gem_object_unpin_pages(obj
);
192 static int i915_gem_end_cpu_access(struct dma_buf
*dma_buf
, enum dma_data_direction direction
)
194 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
195 struct drm_device
*dev
= obj
->base
.dev
;
198 err
= i915_gem_object_pin_pages(obj
);
202 err
= i915_mutex_lock_interruptible(dev
);
206 err
= i915_gem_object_set_to_gtt_domain(obj
, false);
207 mutex_unlock(&dev
->struct_mutex
);
210 i915_gem_object_unpin_pages(obj
);
214 static const struct dma_buf_ops i915_dmabuf_ops
= {
215 .map_dma_buf
= i915_gem_map_dma_buf
,
216 .unmap_dma_buf
= i915_gem_unmap_dma_buf
,
217 .release
= drm_gem_dmabuf_release
,
218 .map
= i915_gem_dmabuf_kmap
,
219 .unmap
= i915_gem_dmabuf_kunmap
,
220 .mmap
= i915_gem_dmabuf_mmap
,
221 .vmap
= i915_gem_dmabuf_vmap
,
222 .vunmap
= i915_gem_dmabuf_vunmap
,
223 .begin_cpu_access
= i915_gem_begin_cpu_access
,
224 .end_cpu_access
= i915_gem_end_cpu_access
,
227 struct dma_buf
*i915_gem_prime_export(struct drm_device
*dev
,
228 struct drm_gem_object
*gem_obj
, int flags
)
230 struct drm_i915_gem_object
*obj
= to_intel_bo(gem_obj
);
231 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
233 exp_info
.ops
= &i915_dmabuf_ops
;
234 exp_info
.size
= gem_obj
->size
;
235 exp_info
.flags
= flags
;
236 exp_info
.priv
= gem_obj
;
237 exp_info
.resv
= obj
->resv
;
239 if (obj
->ops
->dmabuf_export
) {
240 int ret
= obj
->ops
->dmabuf_export(obj
);
245 return drm_gem_dmabuf_export(dev
, &exp_info
);
248 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object
*obj
)
250 struct sg_table
*pages
;
251 unsigned int sg_page_sizes
;
253 pages
= dma_buf_map_attachment(obj
->base
.import_attach
,
256 return PTR_ERR(pages
);
258 sg_page_sizes
= i915_sg_page_sizes(pages
->sgl
);
260 __i915_gem_object_set_pages(obj
, pages
, sg_page_sizes
);
265 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object
*obj
,
266 struct sg_table
*pages
)
268 dma_buf_unmap_attachment(obj
->base
.import_attach
, pages
,
272 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops
= {
273 .get_pages
= i915_gem_object_get_pages_dmabuf
,
274 .put_pages
= i915_gem_object_put_pages_dmabuf
,
277 struct drm_gem_object
*i915_gem_prime_import(struct drm_device
*dev
,
278 struct dma_buf
*dma_buf
)
280 struct dma_buf_attachment
*attach
;
281 struct drm_i915_gem_object
*obj
;
284 /* is this one of own objects? */
285 if (dma_buf
->ops
== &i915_dmabuf_ops
) {
286 obj
= dma_buf_to_obj(dma_buf
);
287 /* is it from our device? */
288 if (obj
->base
.dev
== dev
) {
290 * Importing dmabuf exported from out own gem increases
291 * refcount on gem itself instead of f_count of dmabuf.
293 return &i915_gem_object_get(obj
)->base
;
298 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
300 return ERR_CAST(attach
);
302 get_dma_buf(dma_buf
);
304 obj
= i915_gem_object_alloc(to_i915(dev
));
310 drm_gem_private_object_init(dev
, &obj
->base
, dma_buf
->size
);
311 i915_gem_object_init(obj
, &i915_gem_object_dmabuf_ops
);
312 obj
->base
.import_attach
= attach
;
313 obj
->resv
= dma_buf
->resv
;
315 /* We use GTT as shorthand for a coherent domain, one that is
316 * neither in the GPU cache nor in the CPU cache, where all
317 * writes are immediately visible in memory. (That's not strictly
318 * true, but it's close! There are internal buffers such as the
319 * write-combined buffer or a delay through the chipset for GTT
320 * writes that do require us to treat GTT as a separate cache domain.)
322 obj
->read_domains
= I915_GEM_DOMAIN_GTT
;
323 obj
->write_domain
= 0;
328 dma_buf_detach(dma_buf
, attach
);
329 dma_buf_put(dma_buf
);
334 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
335 #include "selftests/mock_dmabuf.c"
336 #include "selftests/i915_gem_dmabuf.c"