2 * Copyright 2012 Red Hat Inc
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Dave Airlie <airlied@redhat.com>
28 #include <linux/dma-buf.h>
30 static struct drm_i915_gem_object
*dma_buf_to_obj(struct dma_buf
*buf
)
32 return to_intel_bo(buf
->priv
);
35 static struct sg_table
*i915_gem_map_dma_buf(struct dma_buf_attachment
*attachment
,
36 enum dma_data_direction dir
)
38 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
40 struct scatterlist
*src
, *dst
;
43 ret
= i915_mutex_lock_interruptible(obj
->base
.dev
);
47 ret
= i915_gem_object_get_pages(obj
);
51 i915_gem_object_pin_pages(obj
);
53 /* Copy sg so that we make an independent mapping */
54 st
= kmalloc(sizeof(struct sg_table
), GFP_KERNEL
);
60 ret
= sg_alloc_table(st
, obj
->pages
->nents
, GFP_KERNEL
);
64 src
= obj
->pages
->sgl
;
66 for (i
= 0; i
< obj
->pages
->nents
; i
++) {
67 sg_set_page(dst
, sg_page(src
), src
->length
, 0);
72 if (!dma_map_sg(attachment
->dev
, st
->sgl
, st
->nents
, dir
)) {
77 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
85 i915_gem_object_unpin_pages(obj
);
87 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
92 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
94 enum dma_data_direction dir
)
96 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
98 dma_unmap_sg(attachment
->dev
, sg
->sgl
, sg
->nents
, dir
);
102 mutex_lock(&obj
->base
.dev
->struct_mutex
);
103 i915_gem_object_unpin_pages(obj
);
104 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
107 static void *i915_gem_dmabuf_vmap(struct dma_buf
*dma_buf
)
109 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
110 struct drm_device
*dev
= obj
->base
.dev
;
114 ret
= i915_mutex_lock_interruptible(dev
);
118 addr
= i915_gem_object_pin_map(obj
);
119 mutex_unlock(&dev
->struct_mutex
);
124 static void i915_gem_dmabuf_vunmap(struct dma_buf
*dma_buf
, void *vaddr
)
126 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
127 struct drm_device
*dev
= obj
->base
.dev
;
129 mutex_lock(&dev
->struct_mutex
);
130 i915_gem_object_unpin_map(obj
);
131 mutex_unlock(&dev
->struct_mutex
);
134 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
)
139 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
143 static void *i915_gem_dmabuf_kmap(struct dma_buf
*dma_buf
, unsigned long page_num
)
148 static void i915_gem_dmabuf_kunmap(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
153 static int i915_gem_dmabuf_mmap(struct dma_buf
*dma_buf
, struct vm_area_struct
*vma
)
155 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
158 if (obj
->base
.size
< vma
->vm_end
- vma
->vm_start
)
164 ret
= obj
->base
.filp
->f_op
->mmap(obj
->base
.filp
, vma
);
169 vma
->vm_file
= get_file(obj
->base
.filp
);
174 static int i915_gem_begin_cpu_access(struct dma_buf
*dma_buf
, enum dma_data_direction direction
)
176 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
177 struct drm_device
*dev
= obj
->base
.dev
;
179 bool write
= (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_TO_DEVICE
);
181 ret
= i915_mutex_lock_interruptible(dev
);
185 ret
= i915_gem_object_set_to_cpu_domain(obj
, write
);
186 mutex_unlock(&dev
->struct_mutex
);
190 static int i915_gem_end_cpu_access(struct dma_buf
*dma_buf
, enum dma_data_direction direction
)
192 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
193 struct drm_device
*dev
= obj
->base
.dev
;
196 ret
= i915_mutex_lock_interruptible(dev
);
200 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
201 mutex_unlock(&dev
->struct_mutex
);
206 static const struct dma_buf_ops i915_dmabuf_ops
= {
207 .map_dma_buf
= i915_gem_map_dma_buf
,
208 .unmap_dma_buf
= i915_gem_unmap_dma_buf
,
209 .release
= drm_gem_dmabuf_release
,
210 .kmap
= i915_gem_dmabuf_kmap
,
211 .kmap_atomic
= i915_gem_dmabuf_kmap_atomic
,
212 .kunmap
= i915_gem_dmabuf_kunmap
,
213 .kunmap_atomic
= i915_gem_dmabuf_kunmap_atomic
,
214 .mmap
= i915_gem_dmabuf_mmap
,
215 .vmap
= i915_gem_dmabuf_vmap
,
216 .vunmap
= i915_gem_dmabuf_vunmap
,
217 .begin_cpu_access
= i915_gem_begin_cpu_access
,
218 .end_cpu_access
= i915_gem_end_cpu_access
,
221 struct dma_buf
*i915_gem_prime_export(struct drm_device
*dev
,
222 struct drm_gem_object
*gem_obj
, int flags
)
224 struct drm_i915_gem_object
*obj
= to_intel_bo(gem_obj
);
225 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
227 exp_info
.ops
= &i915_dmabuf_ops
;
228 exp_info
.size
= gem_obj
->size
;
229 exp_info
.flags
= flags
;
230 exp_info
.priv
= gem_obj
;
233 if (obj
->ops
->dmabuf_export
) {
234 int ret
= obj
->ops
->dmabuf_export(obj
);
239 return dma_buf_export(&exp_info
);
242 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object
*obj
)
246 sg
= dma_buf_map_attachment(obj
->base
.import_attach
, DMA_BIDIRECTIONAL
);
254 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object
*obj
)
256 dma_buf_unmap_attachment(obj
->base
.import_attach
,
257 obj
->pages
, DMA_BIDIRECTIONAL
);
260 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops
= {
261 .get_pages
= i915_gem_object_get_pages_dmabuf
,
262 .put_pages
= i915_gem_object_put_pages_dmabuf
,
265 struct drm_gem_object
*i915_gem_prime_import(struct drm_device
*dev
,
266 struct dma_buf
*dma_buf
)
268 struct dma_buf_attachment
*attach
;
269 struct drm_i915_gem_object
*obj
;
272 /* is this one of own objects? */
273 if (dma_buf
->ops
== &i915_dmabuf_ops
) {
274 obj
= dma_buf_to_obj(dma_buf
);
275 /* is it from our device? */
276 if (obj
->base
.dev
== dev
) {
278 * Importing dmabuf exported from out own gem increases
279 * refcount on gem itself instead of f_count of dmabuf.
281 return &i915_gem_object_get(obj
)->base
;
286 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
288 return ERR_CAST(attach
);
290 get_dma_buf(dma_buf
);
292 obj
= i915_gem_object_alloc(dev
);
298 drm_gem_private_object_init(dev
, &obj
->base
, dma_buf
->size
);
299 i915_gem_object_init(obj
, &i915_gem_object_dmabuf_ops
);
300 obj
->base
.import_attach
= attach
;
302 /* We use GTT as shorthand for a coherent domain, one that is
303 * neither in the GPU cache nor in the CPU cache, where all
304 * writes are immediately visible in memory. (That's not strictly
305 * true, but it's close! There are internal buffers such as the
306 * write-combined buffer or a delay through the chipset for GTT
307 * writes that do require us to treat GTT as a separate cache domain.)
309 obj
->base
.read_domains
= I915_GEM_DOMAIN_GTT
;
310 obj
->base
.write_domain
= 0;
315 dma_buf_detach(dma_buf
, attach
);
316 dma_buf_put(dma_buf
);