2 * Copyright © 2012 Red Hat
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Dave Airlie <airlied@redhat.com>
25 * Rob Clark <rob.clark@linaro.org>
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <linux/rbtree.h>
32 #include <drm/drm_prime.h>
33 #include <drm/drm_gem.h>
36 #include "drm_internal.h"
39 * DMA-BUF/GEM Object references and lifetime overview:
41 * On the export the dma_buf holds a reference to the exporting GEM
42 * object. It takes this reference in handle_to_fd_ioctl, when it
43 * first calls .prime_export and stores the exporting GEM object in
44 * the dma_buf priv. This reference needs to be released when the
45 * final reference to the &dma_buf itself is dropped and its
46 * &dma_buf_ops.release function is called. For GEM-based drivers,
47 * the dma_buf should be exported using drm_gem_dmabuf_export() and
48 * then released by drm_gem_dmabuf_release().
50 * On the import the importing GEM object holds a reference to the
51 * dma_buf (which in turn holds a ref to the exporting GEM object).
52 * It takes that reference in the fd_to_handle ioctl.
53 * It calls dma_buf_get, creates an attachment to it and stores the
54 * attachment in the GEM object. When this attachment is destroyed
55 * when the imported object is destroyed, we remove the attachment
56 * and drop the reference to the dma_buf.
58 * When all the references to the &dma_buf are dropped, i.e. when
59 * userspace has closed both handles to the imported GEM object (through the
60 * FD_TO_HANDLE IOCTL) and closed the file descriptor of the exported
61 * (through the HANDLE_TO_FD IOCTL) dma_buf, and all kernel-internal references
62 * are also gone, then the dma_buf gets destroyed. This can also happen as a
63 * part of the clean up procedure in the drm_release() function if userspace
64 * fails to properly clean up. Note that both the kernel and userspace (by
65 * keeeping the PRIME file descriptors open) can hold references onto a
68 * Thus the chain of references always flows in one direction
69 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
71 * Self-importing: if userspace is using PRIME as a replacement for flink
72 * then it will get a fd->handle request for a GEM object that it created.
73 * Drivers should detect this situation and return back the gem object
74 * from the dma-buf private. Prime will do this automatically for drivers that
75 * use the drm_gem_prime_{import,export} helpers.
78 struct drm_prime_member
{
79 struct dma_buf
*dma_buf
;
82 struct rb_node dmabuf_rb
;
83 struct rb_node handle_rb
;
86 struct drm_prime_attachment
{
88 enum dma_data_direction dir
;
91 static int drm_prime_add_buf_handle(struct drm_prime_file_private
*prime_fpriv
,
92 struct dma_buf
*dma_buf
, uint32_t handle
)
94 struct drm_prime_member
*member
;
95 struct rb_node
**p
, *rb
;
97 member
= kmalloc(sizeof(*member
), GFP_KERNEL
);
101 get_dma_buf(dma_buf
);
102 member
->dma_buf
= dma_buf
;
103 member
->handle
= handle
;
106 p
= &prime_fpriv
->dmabufs
.rb_node
;
108 struct drm_prime_member
*pos
;
111 pos
= rb_entry(rb
, struct drm_prime_member
, dmabuf_rb
);
112 if (dma_buf
> pos
->dma_buf
)
117 rb_link_node(&member
->dmabuf_rb
, rb
, p
);
118 rb_insert_color(&member
->dmabuf_rb
, &prime_fpriv
->dmabufs
);
121 p
= &prime_fpriv
->handles
.rb_node
;
123 struct drm_prime_member
*pos
;
126 pos
= rb_entry(rb
, struct drm_prime_member
, handle_rb
);
127 if (handle
> pos
->handle
)
132 rb_link_node(&member
->handle_rb
, rb
, p
);
133 rb_insert_color(&member
->handle_rb
, &prime_fpriv
->handles
);
138 static struct dma_buf
*drm_prime_lookup_buf_by_handle(struct drm_prime_file_private
*prime_fpriv
,
143 rb
= prime_fpriv
->handles
.rb_node
;
145 struct drm_prime_member
*member
;
147 member
= rb_entry(rb
, struct drm_prime_member
, handle_rb
);
148 if (member
->handle
== handle
)
149 return member
->dma_buf
;
150 else if (member
->handle
< handle
)
159 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private
*prime_fpriv
,
160 struct dma_buf
*dma_buf
,
165 rb
= prime_fpriv
->dmabufs
.rb_node
;
167 struct drm_prime_member
*member
;
169 member
= rb_entry(rb
, struct drm_prime_member
, dmabuf_rb
);
170 if (member
->dma_buf
== dma_buf
) {
171 *handle
= member
->handle
;
173 } else if (member
->dma_buf
< dma_buf
) {
183 static int drm_gem_map_attach(struct dma_buf
*dma_buf
,
184 struct device
*target_dev
,
185 struct dma_buf_attachment
*attach
)
187 struct drm_prime_attachment
*prime_attach
;
188 struct drm_gem_object
*obj
= dma_buf
->priv
;
189 struct drm_device
*dev
= obj
->dev
;
191 prime_attach
= kzalloc(sizeof(*prime_attach
), GFP_KERNEL
);
195 prime_attach
->dir
= DMA_NONE
;
196 attach
->priv
= prime_attach
;
198 if (!dev
->driver
->gem_prime_pin
)
201 return dev
->driver
->gem_prime_pin(obj
);
204 static void drm_gem_map_detach(struct dma_buf
*dma_buf
,
205 struct dma_buf_attachment
*attach
)
207 struct drm_prime_attachment
*prime_attach
= attach
->priv
;
208 struct drm_gem_object
*obj
= dma_buf
->priv
;
209 struct drm_device
*dev
= obj
->dev
;
210 struct sg_table
*sgt
;
212 if (dev
->driver
->gem_prime_unpin
)
213 dev
->driver
->gem_prime_unpin(obj
);
218 sgt
= prime_attach
->sgt
;
220 if (prime_attach
->dir
!= DMA_NONE
)
221 dma_unmap_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
,
231 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private
*prime_fpriv
,
232 struct dma_buf
*dma_buf
)
236 rb
= prime_fpriv
->dmabufs
.rb_node
;
238 struct drm_prime_member
*member
;
240 member
= rb_entry(rb
, struct drm_prime_member
, dmabuf_rb
);
241 if (member
->dma_buf
== dma_buf
) {
242 rb_erase(&member
->handle_rb
, &prime_fpriv
->handles
);
243 rb_erase(&member
->dmabuf_rb
, &prime_fpriv
->dmabufs
);
245 dma_buf_put(dma_buf
);
248 } else if (member
->dma_buf
< dma_buf
) {
256 static struct sg_table
*drm_gem_map_dma_buf(struct dma_buf_attachment
*attach
,
257 enum dma_data_direction dir
)
259 struct drm_prime_attachment
*prime_attach
= attach
->priv
;
260 struct drm_gem_object
*obj
= attach
->dmabuf
->priv
;
261 struct sg_table
*sgt
;
263 if (WARN_ON(dir
== DMA_NONE
|| !prime_attach
))
264 return ERR_PTR(-EINVAL
);
266 /* return the cached mapping when possible */
267 if (prime_attach
->dir
== dir
)
268 return prime_attach
->sgt
;
271 * two mappings with different directions for the same attachment are
274 if (WARN_ON(prime_attach
->dir
!= DMA_NONE
))
275 return ERR_PTR(-EBUSY
);
277 sgt
= obj
->dev
->driver
->gem_prime_get_sg_table(obj
);
280 if (!dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
)) {
283 sgt
= ERR_PTR(-ENOMEM
);
285 prime_attach
->sgt
= sgt
;
286 prime_attach
->dir
= dir
;
293 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment
*attach
,
294 struct sg_table
*sgt
,
295 enum dma_data_direction dir
)
297 /* nothing to be done here */
301 * drm_gem_dmabuf_export - dma_buf export implementation for GEM
302 * @dev: parent device for the exported dmabuf
303 * @exp_info: the export information used by dma_buf_export()
305 * This wraps dma_buf_export() for use by generic GEM drivers that are using
306 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
307 * a reference to the &drm_device and the exported &drm_gem_object (stored in
308 * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
310 * Returns the new dmabuf.
312 struct dma_buf
*drm_gem_dmabuf_export(struct drm_device
*dev
,
313 struct dma_buf_export_info
*exp_info
)
315 struct dma_buf
*dma_buf
;
317 dma_buf
= dma_buf_export(exp_info
);
322 drm_gem_object_get(exp_info
->priv
);
326 EXPORT_SYMBOL(drm_gem_dmabuf_export
);
329 * drm_gem_dmabuf_release - dma_buf release implementation for GEM
330 * @dma_buf: buffer to be released
332 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
333 * must use this in their dma_buf ops structure as the release callback.
334 * drm_gem_dmabuf_release() should be used in conjunction with
335 * drm_gem_dmabuf_export().
337 void drm_gem_dmabuf_release(struct dma_buf
*dma_buf
)
339 struct drm_gem_object
*obj
= dma_buf
->priv
;
340 struct drm_device
*dev
= obj
->dev
;
342 /* drop the reference on the export fd holds */
343 drm_gem_object_put_unlocked(obj
);
347 EXPORT_SYMBOL(drm_gem_dmabuf_release
);
349 static void *drm_gem_dmabuf_vmap(struct dma_buf
*dma_buf
)
351 struct drm_gem_object
*obj
= dma_buf
->priv
;
352 struct drm_device
*dev
= obj
->dev
;
354 return dev
->driver
->gem_prime_vmap(obj
);
357 static void drm_gem_dmabuf_vunmap(struct dma_buf
*dma_buf
, void *vaddr
)
359 struct drm_gem_object
*obj
= dma_buf
->priv
;
360 struct drm_device
*dev
= obj
->dev
;
362 dev
->driver
->gem_prime_vunmap(obj
, vaddr
);
365 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf
*dma_buf
,
366 unsigned long page_num
)
371 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf
*dma_buf
,
372 unsigned long page_num
, void *addr
)
376 static void *drm_gem_dmabuf_kmap(struct dma_buf
*dma_buf
,
377 unsigned long page_num
)
382 static void drm_gem_dmabuf_kunmap(struct dma_buf
*dma_buf
,
383 unsigned long page_num
, void *addr
)
388 static int drm_gem_dmabuf_mmap(struct dma_buf
*dma_buf
,
389 struct vm_area_struct
*vma
)
391 struct drm_gem_object
*obj
= dma_buf
->priv
;
392 struct drm_device
*dev
= obj
->dev
;
394 if (!dev
->driver
->gem_prime_mmap
)
397 return dev
->driver
->gem_prime_mmap(obj
, vma
);
400 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops
= {
401 .attach
= drm_gem_map_attach
,
402 .detach
= drm_gem_map_detach
,
403 .map_dma_buf
= drm_gem_map_dma_buf
,
404 .unmap_dma_buf
= drm_gem_unmap_dma_buf
,
405 .release
= drm_gem_dmabuf_release
,
406 .map
= drm_gem_dmabuf_kmap
,
407 .map_atomic
= drm_gem_dmabuf_kmap_atomic
,
408 .unmap
= drm_gem_dmabuf_kunmap
,
409 .unmap_atomic
= drm_gem_dmabuf_kunmap_atomic
,
410 .mmap
= drm_gem_dmabuf_mmap
,
411 .vmap
= drm_gem_dmabuf_vmap
,
412 .vunmap
= drm_gem_dmabuf_vunmap
,
418 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
419 * simpler APIs by using the helper functions @drm_gem_prime_export and
420 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
421 * six lower-level driver callbacks:
425 * * @gem_prime_pin (optional): prepare a GEM object for exporting
426 * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
427 * * @gem_prime_vmap: vmap a buffer exported by your driver
428 * * @gem_prime_vunmap: vunmap a buffer exported by your driver
429 * * @gem_prime_mmap (optional): mmap a buffer exported by your driver
433 * * @gem_prime_import_sg_table (import): produce a GEM object from another
434 * driver's scatter/gather table
438 * drm_gem_prime_export - helper library implementation of the export callback
439 * @dev: drm_device to export from
440 * @obj: GEM object to export
441 * @flags: flags like DRM_CLOEXEC and DRM_RDWR
443 * This is the implementation of the gem_prime_export functions for GEM drivers
444 * using the PRIME helpers.
446 struct dma_buf
*drm_gem_prime_export(struct drm_device
*dev
,
447 struct drm_gem_object
*obj
,
450 struct dma_buf_export_info exp_info
= {
451 .exp_name
= KBUILD_MODNAME
, /* white lie for debug */
452 .owner
= dev
->driver
->fops
->owner
,
453 .ops
= &drm_gem_prime_dmabuf_ops
,
459 if (dev
->driver
->gem_prime_res_obj
)
460 exp_info
.resv
= dev
->driver
->gem_prime_res_obj(obj
);
462 return drm_gem_dmabuf_export(dev
, &exp_info
);
464 EXPORT_SYMBOL(drm_gem_prime_export
);
466 static struct dma_buf
*export_and_register_object(struct drm_device
*dev
,
467 struct drm_gem_object
*obj
,
470 struct dma_buf
*dmabuf
;
472 /* prevent races with concurrent gem_close. */
473 if (obj
->handle_count
== 0) {
474 dmabuf
= ERR_PTR(-ENOENT
);
478 dmabuf
= dev
->driver
->gem_prime_export(dev
, obj
, flags
);
479 if (IS_ERR(dmabuf
)) {
480 /* normally the created dma-buf takes ownership of the ref,
481 * but if that fails then drop the ref
487 * Note that callers do not need to clean up the export cache
488 * since the check for obj->handle_count guarantees that someone
491 obj
->dma_buf
= dmabuf
;
492 get_dma_buf(obj
->dma_buf
);
498 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
499 * @dev: dev to export the buffer from
500 * @file_priv: drm file-private structure
501 * @handle: buffer handle to export
502 * @flags: flags like DRM_CLOEXEC
503 * @prime_fd: pointer to storage for the fd id of the create dma-buf
505 * This is the PRIME export function which must be used mandatorily by GEM
506 * drivers to ensure correct lifetime management of the underlying GEM object.
507 * The actual exporting from GEM object to a dma-buf is done through the
508 * gem_prime_export driver callback.
510 int drm_gem_prime_handle_to_fd(struct drm_device
*dev
,
511 struct drm_file
*file_priv
, uint32_t handle
,
515 struct drm_gem_object
*obj
;
517 struct dma_buf
*dmabuf
;
519 mutex_lock(&file_priv
->prime
.lock
);
520 obj
= drm_gem_object_lookup(file_priv
, handle
);
526 dmabuf
= drm_prime_lookup_buf_by_handle(&file_priv
->prime
, handle
);
529 goto out_have_handle
;
532 mutex_lock(&dev
->object_name_lock
);
533 /* re-export the original imported object */
534 if (obj
->import_attach
) {
535 dmabuf
= obj
->import_attach
->dmabuf
;
541 get_dma_buf(obj
->dma_buf
);
542 dmabuf
= obj
->dma_buf
;
546 dmabuf
= export_and_register_object(dev
, obj
, flags
);
547 if (IS_ERR(dmabuf
)) {
548 /* normally the created dma-buf takes ownership of the ref,
549 * but if that fails then drop the ref
551 ret
= PTR_ERR(dmabuf
);
552 mutex_unlock(&dev
->object_name_lock
);
558 * If we've exported this buffer then cheat and add it to the import list
559 * so we get the correct handle back. We must do this under the
560 * protection of dev->object_name_lock to ensure that a racing gem close
561 * ioctl doesn't miss to remove this buffer handle from the cache.
563 ret
= drm_prime_add_buf_handle(&file_priv
->prime
,
565 mutex_unlock(&dev
->object_name_lock
);
567 goto fail_put_dmabuf
;
570 ret
= dma_buf_fd(dmabuf
, flags
);
572 * We must _not_ remove the buffer from the handle cache since the newly
573 * created dma buf is already linked in the global obj->dma_buf pointer,
574 * and that is invariant as long as a userspace gem handle exists.
575 * Closing the handle will clean out the cache anyway, so we don't leak.
578 goto fail_put_dmabuf
;
589 drm_gem_object_put_unlocked(obj
);
591 mutex_unlock(&file_priv
->prime
.lock
);
595 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd
);
598 * drm_gem_prime_import - helper library implementation of the import callback
599 * @dev: drm_device to import into
600 * @dma_buf: dma-buf object to import
602 * This is the implementation of the gem_prime_import functions for GEM drivers
603 * using the PRIME helpers.
605 struct drm_gem_object
*drm_gem_prime_import(struct drm_device
*dev
,
606 struct dma_buf
*dma_buf
)
608 struct dma_buf_attachment
*attach
;
609 struct sg_table
*sgt
;
610 struct drm_gem_object
*obj
;
613 if (dma_buf
->ops
== &drm_gem_prime_dmabuf_ops
) {
615 if (obj
->dev
== dev
) {
617 * Importing dmabuf exported from out own gem increases
618 * refcount on gem itself instead of f_count of dmabuf.
620 drm_gem_object_get(obj
);
625 if (!dev
->driver
->gem_prime_import_sg_table
)
626 return ERR_PTR(-EINVAL
);
628 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
630 return ERR_CAST(attach
);
632 get_dma_buf(dma_buf
);
634 sgt
= dma_buf_map_attachment(attach
, DMA_BIDIRECTIONAL
);
640 obj
= dev
->driver
->gem_prime_import_sg_table(dev
, attach
, sgt
);
646 obj
->import_attach
= attach
;
651 dma_buf_unmap_attachment(attach
, sgt
, DMA_BIDIRECTIONAL
);
653 dma_buf_detach(dma_buf
, attach
);
654 dma_buf_put(dma_buf
);
658 EXPORT_SYMBOL(drm_gem_prime_import
);
661 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
662 * @dev: dev to export the buffer from
663 * @file_priv: drm file-private structure
664 * @prime_fd: fd id of the dma-buf which should be imported
665 * @handle: pointer to storage for the handle of the imported buffer object
667 * This is the PRIME import function which must be used mandatorily by GEM
668 * drivers to ensure correct lifetime management of the underlying GEM object.
669 * The actual importing of GEM object from the dma-buf is done through the
670 * gem_import_export driver callback.
672 int drm_gem_prime_fd_to_handle(struct drm_device
*dev
,
673 struct drm_file
*file_priv
, int prime_fd
,
676 struct dma_buf
*dma_buf
;
677 struct drm_gem_object
*obj
;
680 dma_buf
= dma_buf_get(prime_fd
);
682 return PTR_ERR(dma_buf
);
684 mutex_lock(&file_priv
->prime
.lock
);
686 ret
= drm_prime_lookup_buf_handle(&file_priv
->prime
,
691 /* never seen this one, need to import */
692 mutex_lock(&dev
->object_name_lock
);
693 obj
= dev
->driver
->gem_prime_import(dev
, dma_buf
);
700 WARN_ON(obj
->dma_buf
!= dma_buf
);
702 obj
->dma_buf
= dma_buf
;
703 get_dma_buf(dma_buf
);
706 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
707 ret
= drm_gem_handle_create_tail(file_priv
, obj
, handle
);
708 drm_gem_object_put_unlocked(obj
);
712 ret
= drm_prime_add_buf_handle(&file_priv
->prime
,
714 mutex_unlock(&file_priv
->prime
.lock
);
718 dma_buf_put(dma_buf
);
723 /* hmm, if driver attached, we are relying on the free-object path
724 * to detach.. which seems ok..
726 drm_gem_handle_delete(file_priv
, *handle
);
727 dma_buf_put(dma_buf
);
731 mutex_unlock(&dev
->object_name_lock
);
733 mutex_unlock(&file_priv
->prime
.lock
);
734 dma_buf_put(dma_buf
);
737 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle
);
739 int drm_prime_handle_to_fd_ioctl(struct drm_device
*dev
, void *data
,
740 struct drm_file
*file_priv
)
742 struct drm_prime_handle
*args
= data
;
744 if (!drm_core_check_feature(dev
, DRIVER_PRIME
))
747 if (!dev
->driver
->prime_handle_to_fd
)
750 /* check flags are valid */
751 if (args
->flags
& ~(DRM_CLOEXEC
| DRM_RDWR
))
754 return dev
->driver
->prime_handle_to_fd(dev
, file_priv
,
755 args
->handle
, args
->flags
, &args
->fd
);
758 int drm_prime_fd_to_handle_ioctl(struct drm_device
*dev
, void *data
,
759 struct drm_file
*file_priv
)
761 struct drm_prime_handle
*args
= data
;
763 if (!drm_core_check_feature(dev
, DRIVER_PRIME
))
766 if (!dev
->driver
->prime_fd_to_handle
)
769 return dev
->driver
->prime_fd_to_handle(dev
, file_priv
,
770 args
->fd
, &args
->handle
);
774 * drm_prime_pages_to_sg - converts a page array into an sg list
775 * @pages: pointer to the array of page pointers to convert
776 * @nr_pages: length of the page vector
778 * This helper creates an sg table object from a set of pages
779 * the driver is responsible for mapping the pages into the
780 * importers address space for use with dma_buf itself.
782 struct sg_table
*drm_prime_pages_to_sg(struct page
**pages
, unsigned int nr_pages
)
784 struct sg_table
*sg
= NULL
;
787 sg
= kmalloc(sizeof(struct sg_table
), GFP_KERNEL
);
793 ret
= sg_alloc_table_from_pages(sg
, pages
, nr_pages
, 0,
794 nr_pages
<< PAGE_SHIFT
, GFP_KERNEL
);
803 EXPORT_SYMBOL(drm_prime_pages_to_sg
);
806 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
807 * @sgt: scatter-gather table to convert
808 * @pages: array of page pointers to store the page array in
809 * @addrs: optional array to store the dma bus address of each page
810 * @max_pages: size of both the passed-in arrays
812 * Exports an sg table into an array of pages and addresses. This is currently
813 * required by the TTM driver in order to do correct fault handling.
815 int drm_prime_sg_to_page_addr_arrays(struct sg_table
*sgt
, struct page
**pages
,
816 dma_addr_t
*addrs
, int max_pages
)
819 struct scatterlist
*sg
;
826 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, count
) {
829 addr
= sg_dma_address(sg
);
832 if (WARN_ON(pg_index
>= max_pages
))
834 pages
[pg_index
] = page
;
836 addrs
[pg_index
] = addr
;
846 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays
);
849 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
850 * @obj: GEM object which was created from a dma-buf
851 * @sg: the sg-table which was pinned at import time
853 * This is the cleanup functions which GEM drivers need to call when they use
854 * @drm_gem_prime_import to import dma-bufs.
856 void drm_prime_gem_destroy(struct drm_gem_object
*obj
, struct sg_table
*sg
)
858 struct dma_buf_attachment
*attach
;
859 struct dma_buf
*dma_buf
;
860 attach
= obj
->import_attach
;
862 dma_buf_unmap_attachment(attach
, sg
, DMA_BIDIRECTIONAL
);
863 dma_buf
= attach
->dmabuf
;
864 dma_buf_detach(attach
->dmabuf
, attach
);
865 /* remove the reference */
866 dma_buf_put(dma_buf
);
868 EXPORT_SYMBOL(drm_prime_gem_destroy
);
870 void drm_prime_init_file_private(struct drm_prime_file_private
*prime_fpriv
)
872 mutex_init(&prime_fpriv
->lock
);
873 prime_fpriv
->dmabufs
= RB_ROOT
;
874 prime_fpriv
->handles
= RB_ROOT
;
877 void drm_prime_destroy_file_private(struct drm_prime_file_private
*prime_fpriv
)
879 /* by now drm_gem_release should've made sure the list is empty */
880 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv
->dmabufs
));