2 * Copyright 2012 Red Hat Inc
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Dave Airlie <airlied@redhat.com>
28 #include <linux/dma-buf.h>
30 static struct drm_i915_gem_object
*dma_buf_to_obj(struct dma_buf
*buf
)
32 return to_intel_bo(buf
->priv
);
35 static struct sg_table
*i915_gem_map_dma_buf(struct dma_buf_attachment
*attachment
,
36 enum dma_data_direction dir
)
38 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
40 struct scatterlist
*src
, *dst
;
43 ret
= i915_mutex_lock_interruptible(obj
->base
.dev
);
47 ret
= i915_gem_object_get_pages(obj
);
51 i915_gem_object_pin_pages(obj
);
53 /* Copy sg so that we make an independent mapping */
54 st
= kmalloc(sizeof(struct sg_table
), GFP_KERNEL
);
60 ret
= sg_alloc_table(st
, obj
->pages
->nents
, GFP_KERNEL
);
64 src
= obj
->pages
->sgl
;
66 for (i
= 0; i
< obj
->pages
->nents
; i
++) {
67 sg_set_page(dst
, sg_page(src
), src
->length
, 0);
72 if (!dma_map_sg(attachment
->dev
, st
->sgl
, st
->nents
, dir
)) {
77 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
85 i915_gem_object_unpin_pages(obj
);
87 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
92 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
94 enum dma_data_direction dir
)
96 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
98 mutex_lock(&obj
->base
.dev
->struct_mutex
);
100 dma_unmap_sg(attachment
->dev
, sg
->sgl
, sg
->nents
, dir
);
104 i915_gem_object_unpin_pages(obj
);
106 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
109 static void *i915_gem_dmabuf_vmap(struct dma_buf
*dma_buf
)
111 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
112 struct drm_device
*dev
= obj
->base
.dev
;
113 struct sg_page_iter sg_iter
;
117 ret
= i915_mutex_lock_interruptible(dev
);
121 if (obj
->dma_buf_vmapping
) {
122 obj
->vmapping_count
++;
126 ret
= i915_gem_object_get_pages(obj
);
130 i915_gem_object_pin_pages(obj
);
134 pages
= drm_malloc_ab(obj
->base
.size
>> PAGE_SHIFT
, sizeof(*pages
));
139 for_each_sg_page(obj
->pages
->sgl
, &sg_iter
, obj
->pages
->nents
, 0)
140 pages
[i
++] = sg_page_iter_page(&sg_iter
);
142 obj
->dma_buf_vmapping
= vmap(pages
, i
, 0, PAGE_KERNEL
);
143 drm_free_large(pages
);
145 if (!obj
->dma_buf_vmapping
)
148 obj
->vmapping_count
= 1;
150 mutex_unlock(&dev
->struct_mutex
);
151 return obj
->dma_buf_vmapping
;
154 i915_gem_object_unpin_pages(obj
);
156 mutex_unlock(&dev
->struct_mutex
);
160 static void i915_gem_dmabuf_vunmap(struct dma_buf
*dma_buf
, void *vaddr
)
162 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
163 struct drm_device
*dev
= obj
->base
.dev
;
165 mutex_lock(&dev
->struct_mutex
);
166 if (--obj
->vmapping_count
== 0) {
167 vunmap(obj
->dma_buf_vmapping
);
168 obj
->dma_buf_vmapping
= NULL
;
170 i915_gem_object_unpin_pages(obj
);
172 mutex_unlock(&dev
->struct_mutex
);
175 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
)
180 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
184 static void *i915_gem_dmabuf_kmap(struct dma_buf
*dma_buf
, unsigned long page_num
)
189 static void i915_gem_dmabuf_kunmap(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
194 static int i915_gem_dmabuf_mmap(struct dma_buf
*dma_buf
, struct vm_area_struct
*vma
)
199 static int i915_gem_begin_cpu_access(struct dma_buf
*dma_buf
, size_t start
, size_t length
, enum dma_data_direction direction
)
201 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
202 struct drm_device
*dev
= obj
->base
.dev
;
204 bool write
= (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_TO_DEVICE
);
206 ret
= i915_mutex_lock_interruptible(dev
);
210 ret
= i915_gem_object_set_to_cpu_domain(obj
, write
);
211 mutex_unlock(&dev
->struct_mutex
);
215 static const struct dma_buf_ops i915_dmabuf_ops
= {
216 .map_dma_buf
= i915_gem_map_dma_buf
,
217 .unmap_dma_buf
= i915_gem_unmap_dma_buf
,
218 .release
= drm_gem_dmabuf_release
,
219 .kmap
= i915_gem_dmabuf_kmap
,
220 .kmap_atomic
= i915_gem_dmabuf_kmap_atomic
,
221 .kunmap
= i915_gem_dmabuf_kunmap
,
222 .kunmap_atomic
= i915_gem_dmabuf_kunmap_atomic
,
223 .mmap
= i915_gem_dmabuf_mmap
,
224 .vmap
= i915_gem_dmabuf_vmap
,
225 .vunmap
= i915_gem_dmabuf_vunmap
,
226 .begin_cpu_access
= i915_gem_begin_cpu_access
,
229 struct dma_buf
*i915_gem_prime_export(struct drm_device
*dev
,
230 struct drm_gem_object
*gem_obj
, int flags
)
232 struct drm_i915_gem_object
*obj
= to_intel_bo(gem_obj
);
233 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
235 exp_info
.ops
= &i915_dmabuf_ops
;
236 exp_info
.size
= gem_obj
->size
;
237 exp_info
.flags
= flags
;
238 exp_info
.priv
= gem_obj
;
241 if (obj
->ops
->dmabuf_export
) {
242 int ret
= obj
->ops
->dmabuf_export(obj
);
247 return dma_buf_export(&exp_info
);
250 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object
*obj
)
254 sg
= dma_buf_map_attachment(obj
->base
.import_attach
, DMA_BIDIRECTIONAL
);
262 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object
*obj
)
264 dma_buf_unmap_attachment(obj
->base
.import_attach
,
265 obj
->pages
, DMA_BIDIRECTIONAL
);
268 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops
= {
269 .get_pages
= i915_gem_object_get_pages_dmabuf
,
270 .put_pages
= i915_gem_object_put_pages_dmabuf
,
273 struct drm_gem_object
*i915_gem_prime_import(struct drm_device
*dev
,
274 struct dma_buf
*dma_buf
)
276 struct dma_buf_attachment
*attach
;
277 struct drm_i915_gem_object
*obj
;
280 /* is this one of own objects? */
281 if (dma_buf
->ops
== &i915_dmabuf_ops
) {
282 obj
= dma_buf_to_obj(dma_buf
);
283 /* is it from our device? */
284 if (obj
->base
.dev
== dev
) {
286 * Importing dmabuf exported from out own gem increases
287 * refcount on gem itself instead of f_count of dmabuf.
289 drm_gem_object_reference(&obj
->base
);
295 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
297 return ERR_CAST(attach
);
299 get_dma_buf(dma_buf
);
301 obj
= i915_gem_object_alloc(dev
);
307 drm_gem_private_object_init(dev
, &obj
->base
, dma_buf
->size
);
308 i915_gem_object_init(obj
, &i915_gem_object_dmabuf_ops
);
309 obj
->base
.import_attach
= attach
;
314 dma_buf_detach(dma_buf
, attach
);
315 dma_buf_put(dma_buf
);