2 * Copyright 2012 Red Hat Inc
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Dave Airlie <airlied@redhat.com>
27 #include <linux/dma-buf.h>
28 #include <linux/reservation.h>
34 static struct drm_i915_gem_object
*dma_buf_to_obj(struct dma_buf
*buf
)
36 return to_intel_bo(buf
->priv
);
39 static struct sg_table
*i915_gem_map_dma_buf(struct dma_buf_attachment
*attachment
,
40 enum dma_data_direction dir
)
42 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
44 struct scatterlist
*src
, *dst
;
47 ret
= i915_mutex_lock_interruptible(obj
->base
.dev
);
51 ret
= i915_gem_object_get_pages(obj
);
55 i915_gem_object_pin_pages(obj
);
57 /* Copy sg so that we make an independent mapping */
58 st
= kmalloc(sizeof(struct sg_table
), GFP_KERNEL
);
64 ret
= sg_alloc_table(st
, obj
->pages
->nents
, GFP_KERNEL
);
68 src
= obj
->pages
->sgl
;
70 for (i
= 0; i
< obj
->pages
->nents
; i
++) {
71 sg_set_page(dst
, sg_page(src
), src
->length
, 0);
76 if (!dma_map_sg(attachment
->dev
, st
->sgl
, st
->nents
, dir
)) {
81 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
89 i915_gem_object_unpin_pages(obj
);
91 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
96 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
98 enum dma_data_direction dir
)
100 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
102 dma_unmap_sg(attachment
->dev
, sg
->sgl
, sg
->nents
, dir
);
106 mutex_lock(&obj
->base
.dev
->struct_mutex
);
107 i915_gem_object_unpin_pages(obj
);
108 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
111 static void *i915_gem_dmabuf_vmap(struct dma_buf
*dma_buf
)
113 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
114 struct drm_device
*dev
= obj
->base
.dev
;
118 ret
= i915_mutex_lock_interruptible(dev
);
122 addr
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
123 mutex_unlock(&dev
->struct_mutex
);
128 static void i915_gem_dmabuf_vunmap(struct dma_buf
*dma_buf
, void *vaddr
)
130 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
131 struct drm_device
*dev
= obj
->base
.dev
;
133 mutex_lock(&dev
->struct_mutex
);
134 i915_gem_object_unpin_map(obj
);
135 mutex_unlock(&dev
->struct_mutex
);
138 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
)
143 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
147 static void *i915_gem_dmabuf_kmap(struct dma_buf
*dma_buf
, unsigned long page_num
)
152 static void i915_gem_dmabuf_kunmap(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
157 static int i915_gem_dmabuf_mmap(struct dma_buf
*dma_buf
, struct vm_area_struct
*vma
)
159 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
162 if (obj
->base
.size
< vma
->vm_end
- vma
->vm_start
)
168 ret
= obj
->base
.filp
->f_op
->mmap(obj
->base
.filp
, vma
);
173 vma
->vm_file
= get_file(obj
->base
.filp
);
178 static int i915_gem_begin_cpu_access(struct dma_buf
*dma_buf
, enum dma_data_direction direction
)
180 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
181 struct drm_device
*dev
= obj
->base
.dev
;
183 bool write
= (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_TO_DEVICE
);
185 ret
= i915_mutex_lock_interruptible(dev
);
189 ret
= i915_gem_object_set_to_cpu_domain(obj
, write
);
190 mutex_unlock(&dev
->struct_mutex
);
194 static int i915_gem_end_cpu_access(struct dma_buf
*dma_buf
, enum dma_data_direction direction
)
196 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
197 struct drm_device
*dev
= obj
->base
.dev
;
200 ret
= i915_mutex_lock_interruptible(dev
);
204 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
205 mutex_unlock(&dev
->struct_mutex
);
210 static const struct dma_buf_ops i915_dmabuf_ops
= {
211 .map_dma_buf
= i915_gem_map_dma_buf
,
212 .unmap_dma_buf
= i915_gem_unmap_dma_buf
,
213 .release
= drm_gem_dmabuf_release
,
214 .kmap
= i915_gem_dmabuf_kmap
,
215 .kmap_atomic
= i915_gem_dmabuf_kmap_atomic
,
216 .kunmap
= i915_gem_dmabuf_kunmap
,
217 .kunmap_atomic
= i915_gem_dmabuf_kunmap_atomic
,
218 .mmap
= i915_gem_dmabuf_mmap
,
219 .vmap
= i915_gem_dmabuf_vmap
,
220 .vunmap
= i915_gem_dmabuf_vunmap
,
221 .begin_cpu_access
= i915_gem_begin_cpu_access
,
222 .end_cpu_access
= i915_gem_end_cpu_access
,
225 static void export_fences(struct drm_i915_gem_object
*obj
,
226 struct dma_buf
*dma_buf
)
228 struct reservation_object
*resv
= dma_buf
->resv
;
229 struct drm_i915_gem_request
*req
;
230 unsigned long active
;
233 active
= __I915_BO_ACTIVE(obj
);
237 /* Serialise with execbuf to prevent concurrent fence-loops */
238 mutex_lock(&obj
->base
.dev
->struct_mutex
);
240 /* Mark the object for future fences before racily adding old fences */
241 obj
->base
.dma_buf
= dma_buf
;
243 ww_mutex_lock(&resv
->lock
, NULL
);
245 for_each_active(active
, idx
) {
246 req
= i915_gem_active_get(&obj
->last_read
[idx
],
247 &obj
->base
.dev
->struct_mutex
);
251 if (reservation_object_reserve_shared(resv
) == 0)
252 reservation_object_add_shared_fence(resv
, &req
->fence
);
254 i915_gem_request_put(req
);
257 req
= i915_gem_active_get(&obj
->last_write
,
258 &obj
->base
.dev
->struct_mutex
);
260 reservation_object_add_excl_fence(resv
, &req
->fence
);
261 i915_gem_request_put(req
);
264 ww_mutex_unlock(&resv
->lock
);
265 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
268 struct dma_buf
*i915_gem_prime_export(struct drm_device
*dev
,
269 struct drm_gem_object
*gem_obj
, int flags
)
271 struct drm_i915_gem_object
*obj
= to_intel_bo(gem_obj
);
272 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
273 struct dma_buf
*dma_buf
;
275 exp_info
.ops
= &i915_dmabuf_ops
;
276 exp_info
.size
= gem_obj
->size
;
277 exp_info
.flags
= flags
;
278 exp_info
.priv
= gem_obj
;
280 if (obj
->ops
->dmabuf_export
) {
281 int ret
= obj
->ops
->dmabuf_export(obj
);
286 dma_buf
= drm_gem_dmabuf_export(dev
, &exp_info
);
290 export_fences(obj
, dma_buf
);
294 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object
*obj
)
298 sg
= dma_buf_map_attachment(obj
->base
.import_attach
, DMA_BIDIRECTIONAL
);
306 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object
*obj
)
308 dma_buf_unmap_attachment(obj
->base
.import_attach
,
309 obj
->pages
, DMA_BIDIRECTIONAL
);
312 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops
= {
313 .get_pages
= i915_gem_object_get_pages_dmabuf
,
314 .put_pages
= i915_gem_object_put_pages_dmabuf
,
317 struct drm_gem_object
*i915_gem_prime_import(struct drm_device
*dev
,
318 struct dma_buf
*dma_buf
)
320 struct dma_buf_attachment
*attach
;
321 struct drm_i915_gem_object
*obj
;
324 /* is this one of own objects? */
325 if (dma_buf
->ops
== &i915_dmabuf_ops
) {
326 obj
= dma_buf_to_obj(dma_buf
);
327 /* is it from our device? */
328 if (obj
->base
.dev
== dev
) {
330 * Importing dmabuf exported from out own gem increases
331 * refcount on gem itself instead of f_count of dmabuf.
333 return &i915_gem_object_get(obj
)->base
;
338 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
340 return ERR_CAST(attach
);
342 get_dma_buf(dma_buf
);
344 obj
= i915_gem_object_alloc(dev
);
350 drm_gem_private_object_init(dev
, &obj
->base
, dma_buf
->size
);
351 i915_gem_object_init(obj
, &i915_gem_object_dmabuf_ops
);
352 obj
->base
.import_attach
= attach
;
354 /* We use GTT as shorthand for a coherent domain, one that is
355 * neither in the GPU cache nor in the CPU cache, where all
356 * writes are immediately visible in memory. (That's not strictly
357 * true, but it's close! There are internal buffers such as the
358 * write-combined buffer or a delay through the chipset for GTT
359 * writes that do require us to treat GTT as a separate cache domain.)
361 obj
->base
.read_domains
= I915_GEM_DOMAIN_GTT
;
362 obj
->base
.write_domain
= 0;
367 dma_buf_detach(dma_buf
, attach
);
368 dma_buf_put(dma_buf
);