2 * Framework for buffer objects that can be shared across devices/subsystems.
4 * Copyright(C) 2011 Linaro Limited. All rights reserved.
5 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 * Many thanks to linaro-mm-sig list, and specially
8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
10 * refining of this idea.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published by
14 * the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program. If not, see <http://www.gnu.org/licenses/>.
26 #include <linux/slab.h>
27 #include <linux/dma-buf.h>
28 #include <linux/dma-fence.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/export.h>
31 #include <linux/debugfs.h>
32 #include <linux/module.h>
33 #include <linux/seq_file.h>
34 #include <linux/poll.h>
35 #include <linux/reservation.h>
37 #include <linux/mount.h>
39 #include <uapi/linux/dma-buf.h>
40 #include <uapi/linux/magic.h>
42 static inline int is_dma_buf_file(struct file
*);
45 struct list_head head
;
49 static struct dma_buf_list db_list
;
51 static char *dmabuffs_dname(struct dentry
*dentry
, char *buffer
, int buflen
)
53 struct dma_buf
*dmabuf
;
54 char name
[DMA_BUF_NAME_LEN
];
57 dmabuf
= dentry
->d_fsdata
;
58 mutex_lock(&dmabuf
->lock
);
60 ret
= strlcpy(name
, dmabuf
->name
, DMA_BUF_NAME_LEN
);
61 mutex_unlock(&dmabuf
->lock
);
63 return dynamic_dname(dentry
, buffer
, buflen
, "/%s:%s",
64 dentry
->d_name
.name
, ret
> 0 ? name
: "");
67 static const struct dentry_operations dma_buf_dentry_ops
= {
68 .d_dname
= dmabuffs_dname
,
71 static struct vfsmount
*dma_buf_mnt
;
73 static struct dentry
*dma_buf_fs_mount(struct file_system_type
*fs_type
,
74 int flags
, const char *name
, void *data
)
76 return mount_pseudo(fs_type
, "dmabuf:", NULL
, &dma_buf_dentry_ops
,
80 static struct file_system_type dma_buf_fs_type
= {
82 .mount
= dma_buf_fs_mount
,
83 .kill_sb
= kill_anon_super
,
86 static int dma_buf_release(struct inode
*inode
, struct file
*file
)
88 struct dma_buf
*dmabuf
;
90 if (!is_dma_buf_file(file
))
93 dmabuf
= file
->private_data
;
95 BUG_ON(dmabuf
->vmapping_counter
);
98 * Any fences that a dma-buf poll can wait on should be signaled
99 * before releasing dma-buf. This is the responsibility of each
100 * driver that uses the reservation objects.
102 * If you hit this BUG() it means someone dropped their ref to the
103 * dma-buf while still having pending operation to the buffer.
105 BUG_ON(dmabuf
->cb_shared
.active
|| dmabuf
->cb_excl
.active
);
107 dmabuf
->ops
->release(dmabuf
);
109 mutex_lock(&db_list
.lock
);
110 list_del(&dmabuf
->list_node
);
111 mutex_unlock(&db_list
.lock
);
113 if (dmabuf
->resv
== (struct reservation_object
*)&dmabuf
[1])
114 reservation_object_fini(dmabuf
->resv
);
116 module_put(dmabuf
->owner
);
121 static int dma_buf_mmap_internal(struct file
*file
, struct vm_area_struct
*vma
)
123 struct dma_buf
*dmabuf
;
125 if (!is_dma_buf_file(file
))
128 dmabuf
= file
->private_data
;
130 /* check if buffer supports mmap */
131 if (!dmabuf
->ops
->mmap
)
134 /* check for overflowing the buffer's size */
135 if (vma
->vm_pgoff
+ vma_pages(vma
) >
136 dmabuf
->size
>> PAGE_SHIFT
)
139 return dmabuf
->ops
->mmap(dmabuf
, vma
);
142 static loff_t
dma_buf_llseek(struct file
*file
, loff_t offset
, int whence
)
144 struct dma_buf
*dmabuf
;
147 if (!is_dma_buf_file(file
))
150 dmabuf
= file
->private_data
;
152 /* only support discovering the end of the buffer,
153 but also allow SEEK_SET to maintain the idiomatic
154 SEEK_END(0), SEEK_CUR(0) pattern */
155 if (whence
== SEEK_END
)
157 else if (whence
== SEEK_SET
)
165 return base
+ offset
;
171 * To support cross-device and cross-driver synchronization of buffer access
172 * implicit fences (represented internally in the kernel with &struct fence) can
173 * be attached to a &dma_buf. The glue for that and a few related things are
174 * provided in the &reservation_object structure.
176 * Userspace can query the state of these implicitly tracked fences using poll()
177 * and related system calls:
179 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
180 * most recent write or exclusive fence.
182 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
183 * all attached fences, shared and exclusive ones.
185 * Note that this only signals the completion of the respective fences, i.e. the
186 * DMA transfers are complete. Cache flushing and any other necessary
187 * preparations before CPU access can begin still need to happen.
190 static void dma_buf_poll_cb(struct dma_fence
*fence
, struct dma_fence_cb
*cb
)
192 struct dma_buf_poll_cb_t
*dcb
= (struct dma_buf_poll_cb_t
*)cb
;
195 spin_lock_irqsave(&dcb
->poll
->lock
, flags
);
196 wake_up_locked_poll(dcb
->poll
, dcb
->active
);
198 spin_unlock_irqrestore(&dcb
->poll
->lock
, flags
);
201 static __poll_t
dma_buf_poll(struct file
*file
, poll_table
*poll
)
203 struct dma_buf
*dmabuf
;
204 struct reservation_object
*resv
;
205 struct reservation_object_list
*fobj
;
206 struct dma_fence
*fence_excl
;
208 unsigned shared_count
, seq
;
210 dmabuf
= file
->private_data
;
211 if (!dmabuf
|| !dmabuf
->resv
)
216 poll_wait(file
, &dmabuf
->poll
, poll
);
218 events
= poll_requested_events(poll
) & (EPOLLIN
| EPOLLOUT
);
223 seq
= read_seqcount_begin(&resv
->seq
);
226 fobj
= rcu_dereference(resv
->fence
);
228 shared_count
= fobj
->shared_count
;
231 fence_excl
= rcu_dereference(resv
->fence_excl
);
232 if (read_seqcount_retry(&resv
->seq
, seq
)) {
237 if (fence_excl
&& (!(events
& EPOLLOUT
) || shared_count
== 0)) {
238 struct dma_buf_poll_cb_t
*dcb
= &dmabuf
->cb_excl
;
239 __poll_t pevents
= EPOLLIN
;
241 if (shared_count
== 0)
244 spin_lock_irq(&dmabuf
->poll
.lock
);
246 dcb
->active
|= pevents
;
249 dcb
->active
= pevents
;
250 spin_unlock_irq(&dmabuf
->poll
.lock
);
252 if (events
& pevents
) {
253 if (!dma_fence_get_rcu(fence_excl
)) {
254 /* force a recheck */
256 dma_buf_poll_cb(NULL
, &dcb
->cb
);
257 } else if (!dma_fence_add_callback(fence_excl
, &dcb
->cb
,
260 dma_fence_put(fence_excl
);
263 * No callback queued, wake up any additional
266 dma_fence_put(fence_excl
);
267 dma_buf_poll_cb(NULL
, &dcb
->cb
);
272 if ((events
& EPOLLOUT
) && shared_count
> 0) {
273 struct dma_buf_poll_cb_t
*dcb
= &dmabuf
->cb_shared
;
276 /* Only queue a new callback if no event has fired yet */
277 spin_lock_irq(&dmabuf
->poll
.lock
);
281 dcb
->active
= EPOLLOUT
;
282 spin_unlock_irq(&dmabuf
->poll
.lock
);
284 if (!(events
& EPOLLOUT
))
287 for (i
= 0; i
< shared_count
; ++i
) {
288 struct dma_fence
*fence
= rcu_dereference(fobj
->shared
[i
]);
290 if (!dma_fence_get_rcu(fence
)) {
292 * fence refcount dropped to zero, this means
293 * that fobj has been freed
295 * call dma_buf_poll_cb and force a recheck!
298 dma_buf_poll_cb(NULL
, &dcb
->cb
);
301 if (!dma_fence_add_callback(fence
, &dcb
->cb
,
303 dma_fence_put(fence
);
307 dma_fence_put(fence
);
310 /* No callback queued, wake up any additional waiters. */
311 if (i
== shared_count
)
312 dma_buf_poll_cb(NULL
, &dcb
->cb
);
321 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
322 * The name of the dma-buf buffer can only be set when the dma-buf is not
323 * attached to any devices. It could theoritically support changing the
324 * name of the dma-buf if the same piece of memory is used for multiple
325 * purpose between different devices.
327 * @dmabuf [in] dmabuf buffer that will be renamed.
328 * @buf: [in] A piece of userspace memory that contains the name of
331 * Returns 0 on success. If the dma-buf buffer is already attached to
332 * devices, return -EBUSY.
335 static long dma_buf_set_name(struct dma_buf
*dmabuf
, const char __user
*buf
)
337 char *name
= strndup_user(buf
, DMA_BUF_NAME_LEN
);
341 return PTR_ERR(name
);
343 mutex_lock(&dmabuf
->lock
);
344 if (!list_empty(&dmabuf
->attachments
)) {
353 mutex_unlock(&dmabuf
->lock
);
357 static long dma_buf_ioctl(struct file
*file
,
358 unsigned int cmd
, unsigned long arg
)
360 struct dma_buf
*dmabuf
;
361 struct dma_buf_sync sync
;
362 enum dma_data_direction direction
;
365 dmabuf
= file
->private_data
;
368 case DMA_BUF_IOCTL_SYNC
:
369 if (copy_from_user(&sync
, (void __user
*) arg
, sizeof(sync
)))
372 if (sync
.flags
& ~DMA_BUF_SYNC_VALID_FLAGS_MASK
)
375 switch (sync
.flags
& DMA_BUF_SYNC_RW
) {
376 case DMA_BUF_SYNC_READ
:
377 direction
= DMA_FROM_DEVICE
;
379 case DMA_BUF_SYNC_WRITE
:
380 direction
= DMA_TO_DEVICE
;
382 case DMA_BUF_SYNC_RW
:
383 direction
= DMA_BIDIRECTIONAL
;
389 if (sync
.flags
& DMA_BUF_SYNC_END
)
390 ret
= dma_buf_end_cpu_access(dmabuf
, direction
);
392 ret
= dma_buf_begin_cpu_access(dmabuf
, direction
);
396 case DMA_BUF_SET_NAME
:
397 return dma_buf_set_name(dmabuf
, (const char __user
*)arg
);
404 static void dma_buf_show_fdinfo(struct seq_file
*m
, struct file
*file
)
406 struct dma_buf
*dmabuf
= file
->private_data
;
408 seq_printf(m
, "size:\t%zu\n", dmabuf
->size
);
409 /* Don't count the temporary reference taken inside procfs seq_show */
410 seq_printf(m
, "count:\t%ld\n", file_count(dmabuf
->file
) - 1);
411 seq_printf(m
, "exp_name:\t%s\n", dmabuf
->exp_name
);
412 mutex_lock(&dmabuf
->lock
);
414 seq_printf(m
, "name:\t%s\n", dmabuf
->name
);
415 mutex_unlock(&dmabuf
->lock
);
418 static const struct file_operations dma_buf_fops
= {
419 .release
= dma_buf_release
,
420 .mmap
= dma_buf_mmap_internal
,
421 .llseek
= dma_buf_llseek
,
422 .poll
= dma_buf_poll
,
423 .unlocked_ioctl
= dma_buf_ioctl
,
425 .compat_ioctl
= dma_buf_ioctl
,
427 .show_fdinfo
= dma_buf_show_fdinfo
,
431 * is_dma_buf_file - Check if struct file* is associated with dma_buf
433 static inline int is_dma_buf_file(struct file
*file
)
435 return file
->f_op
== &dma_buf_fops
;
438 static struct file
*dma_buf_getfile(struct dma_buf
*dmabuf
, int flags
)
441 struct inode
*inode
= alloc_anon_inode(dma_buf_mnt
->mnt_sb
);
444 return ERR_CAST(inode
);
446 inode
->i_size
= dmabuf
->size
;
447 inode_set_bytes(inode
, dmabuf
->size
);
449 file
= alloc_file_pseudo(inode
, dma_buf_mnt
, "dmabuf",
450 flags
, &dma_buf_fops
);
453 file
->f_flags
= flags
& (O_ACCMODE
| O_NONBLOCK
);
454 file
->private_data
= dmabuf
;
455 file
->f_path
.dentry
->d_fsdata
= dmabuf
;
465 * DOC: dma buf device access
467 * For device DMA access to a shared DMA buffer the usual sequence of operations
470 * 1. The exporter defines his exporter instance using
471 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
472 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
473 * as a file descriptor by calling dma_buf_fd().
475 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
476 * to share with: First the filedescriptor is converted to a &dma_buf using
477 * dma_buf_get(). Then the buffer is attached to the device using
480 * Up to this stage the exporter is still free to migrate or reallocate the
483 * 3. Once the buffer is attached to all devices userspace can initiate DMA
484 * access to the shared buffer. In the kernel this is done by calling
485 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
487 * 4. Once a driver is done with a shared buffer it needs to call
488 * dma_buf_detach() (after cleaning up any mappings) and then release the
489 * reference acquired with dma_buf_get by calling dma_buf_put().
491 * For the detailed semantics exporters are expected to implement see
496 * dma_buf_export - Creates a new dma_buf, and associates an anon file
497 * with this buffer, so it can be exported.
498 * Also connect the allocator specific data and ops to the buffer.
499 * Additionally, provide a name string for exporter; useful in debugging.
501 * @exp_info: [in] holds all the export related information provided
502 * by the exporter. see &struct dma_buf_export_info
503 * for further details.
505 * Returns, on success, a newly created dma_buf object, which wraps the
506 * supplied private data and operations for dma_buf_ops. On either missing
507 * ops, or error in allocating struct dma_buf, will return negative error.
509 * For most cases the easiest way to create @exp_info is through the
510 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
512 struct dma_buf
*dma_buf_export(const struct dma_buf_export_info
*exp_info
)
514 struct dma_buf
*dmabuf
;
515 struct reservation_object
*resv
= exp_info
->resv
;
517 size_t alloc_size
= sizeof(struct dma_buf
);
521 alloc_size
+= sizeof(struct reservation_object
);
523 /* prevent &dma_buf[1] == dma_buf->resv */
526 if (WARN_ON(!exp_info
->priv
528 || !exp_info
->ops
->map_dma_buf
529 || !exp_info
->ops
->unmap_dma_buf
530 || !exp_info
->ops
->release
)) {
531 return ERR_PTR(-EINVAL
);
534 if (!try_module_get(exp_info
->owner
))
535 return ERR_PTR(-ENOENT
);
537 dmabuf
= kzalloc(alloc_size
, GFP_KERNEL
);
543 dmabuf
->priv
= exp_info
->priv
;
544 dmabuf
->ops
= exp_info
->ops
;
545 dmabuf
->size
= exp_info
->size
;
546 dmabuf
->exp_name
= exp_info
->exp_name
;
547 dmabuf
->owner
= exp_info
->owner
;
548 init_waitqueue_head(&dmabuf
->poll
);
549 dmabuf
->cb_excl
.poll
= dmabuf
->cb_shared
.poll
= &dmabuf
->poll
;
550 dmabuf
->cb_excl
.active
= dmabuf
->cb_shared
.active
= 0;
553 resv
= (struct reservation_object
*)&dmabuf
[1];
554 reservation_object_init(resv
);
558 file
= dma_buf_getfile(dmabuf
, exp_info
->flags
);
564 file
->f_mode
|= FMODE_LSEEK
;
567 mutex_init(&dmabuf
->lock
);
568 INIT_LIST_HEAD(&dmabuf
->attachments
);
570 mutex_lock(&db_list
.lock
);
571 list_add(&dmabuf
->list_node
, &db_list
.head
);
572 mutex_unlock(&db_list
.lock
);
579 module_put(exp_info
->owner
);
582 EXPORT_SYMBOL_GPL(dma_buf_export
);
585 * dma_buf_fd - returns a file descriptor for the given dma_buf
586 * @dmabuf: [in] pointer to dma_buf for which fd is required.
587 * @flags: [in] flags to give to fd
589 * On success, returns an associated 'fd'. Else, returns error.
591 int dma_buf_fd(struct dma_buf
*dmabuf
, int flags
)
595 if (!dmabuf
|| !dmabuf
->file
)
598 fd
= get_unused_fd_flags(flags
);
602 fd_install(fd
, dmabuf
->file
);
606 EXPORT_SYMBOL_GPL(dma_buf_fd
);
609 * dma_buf_get - returns the dma_buf structure related to an fd
610 * @fd: [in] fd associated with the dma_buf to be returned
612 * On success, returns the dma_buf structure associated with an fd; uses
613 * file's refcounting done by fget to increase refcount. returns ERR_PTR
616 struct dma_buf
*dma_buf_get(int fd
)
623 return ERR_PTR(-EBADF
);
625 if (!is_dma_buf_file(file
)) {
627 return ERR_PTR(-EINVAL
);
630 return file
->private_data
;
632 EXPORT_SYMBOL_GPL(dma_buf_get
);
635 * dma_buf_put - decreases refcount of the buffer
636 * @dmabuf: [in] buffer to reduce refcount of
638 * Uses file's refcounting done implicitly by fput().
640 * If, as a result of this call, the refcount becomes 0, the 'release' file
641 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
642 * in turn, and frees the memory allocated for dmabuf when exported.
644 void dma_buf_put(struct dma_buf
*dmabuf
)
646 if (WARN_ON(!dmabuf
|| !dmabuf
->file
))
651 EXPORT_SYMBOL_GPL(dma_buf_put
);
654 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
655 * calls attach() of dma_buf_ops to allow device-specific attach functionality
656 * @dmabuf: [in] buffer to attach device to.
657 * @dev: [in] device to be attached.
659 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
660 * must be cleaned up by calling dma_buf_detach().
664 * A pointer to newly created &dma_buf_attachment on success, or a negative
665 * error code wrapped into a pointer on failure.
667 * Note that this can fail if the backing storage of @dmabuf is in a place not
668 * accessible to @dev, and cannot be moved to a more suitable place. This is
669 * indicated with the error code -EBUSY.
671 struct dma_buf_attachment
*dma_buf_attach(struct dma_buf
*dmabuf
,
674 struct dma_buf_attachment
*attach
;
677 if (WARN_ON(!dmabuf
|| !dev
))
678 return ERR_PTR(-EINVAL
);
680 attach
= kzalloc(sizeof(*attach
), GFP_KERNEL
);
682 return ERR_PTR(-ENOMEM
);
685 attach
->dmabuf
= dmabuf
;
687 mutex_lock(&dmabuf
->lock
);
689 if (dmabuf
->ops
->attach
) {
690 ret
= dmabuf
->ops
->attach(dmabuf
, attach
);
694 list_add(&attach
->node
, &dmabuf
->attachments
);
696 mutex_unlock(&dmabuf
->lock
);
702 mutex_unlock(&dmabuf
->lock
);
705 EXPORT_SYMBOL_GPL(dma_buf_attach
);
708 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
709 * optionally calls detach() of dma_buf_ops for device-specific detach
710 * @dmabuf: [in] buffer to detach from.
711 * @attach: [in] attachment to be detached; is free'd after this call.
713 * Clean up a device attachment obtained by calling dma_buf_attach().
715 void dma_buf_detach(struct dma_buf
*dmabuf
, struct dma_buf_attachment
*attach
)
717 if (WARN_ON(!dmabuf
|| !attach
))
721 dmabuf
->ops
->unmap_dma_buf(attach
, attach
->sgt
, attach
->dir
);
723 mutex_lock(&dmabuf
->lock
);
724 list_del(&attach
->node
);
725 if (dmabuf
->ops
->detach
)
726 dmabuf
->ops
->detach(dmabuf
, attach
);
728 mutex_unlock(&dmabuf
->lock
);
731 EXPORT_SYMBOL_GPL(dma_buf_detach
);
734 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
735 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
737 * @attach: [in] attachment whose scatterlist is to be returned
738 * @direction: [in] direction of DMA transfer
740 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
741 * on error. May return -EINTR if it is interrupted by a signal.
743 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
744 * the underlying backing storage is pinned for as long as a mapping exists,
745 * therefore users/importers should not hold onto a mapping for undue amounts of
748 struct sg_table
*dma_buf_map_attachment(struct dma_buf_attachment
*attach
,
749 enum dma_data_direction direction
)
751 struct sg_table
*sg_table
;
755 if (WARN_ON(!attach
|| !attach
->dmabuf
))
756 return ERR_PTR(-EINVAL
);
760 * Two mappings with different directions for the same
761 * attachment are not allowed.
763 if (attach
->dir
!= direction
&&
764 attach
->dir
!= DMA_BIDIRECTIONAL
)
765 return ERR_PTR(-EBUSY
);
770 sg_table
= attach
->dmabuf
->ops
->map_dma_buf(attach
, direction
);
772 sg_table
= ERR_PTR(-ENOMEM
);
774 if (!IS_ERR(sg_table
) && attach
->dmabuf
->ops
->cache_sgt_mapping
) {
775 attach
->sgt
= sg_table
;
776 attach
->dir
= direction
;
781 EXPORT_SYMBOL_GPL(dma_buf_map_attachment
);
784 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
785 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
787 * @attach: [in] attachment to unmap buffer from
788 * @sg_table: [in] scatterlist info of the buffer to unmap
789 * @direction: [in] direction of DMA transfer
791 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
793 void dma_buf_unmap_attachment(struct dma_buf_attachment
*attach
,
794 struct sg_table
*sg_table
,
795 enum dma_data_direction direction
)
799 if (WARN_ON(!attach
|| !attach
->dmabuf
|| !sg_table
))
802 if (attach
->sgt
== sg_table
)
805 attach
->dmabuf
->ops
->unmap_dma_buf(attach
, sg_table
, direction
);
807 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment
);
812 * There are mutliple reasons for supporting CPU access to a dma buffer object:
814 * - Fallback operations in the kernel, for example when a device is connected
815 * over USB and the kernel needs to shuffle the data around first before
816 * sending it away. Cache coherency is handled by braketing any transactions
817 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
820 * To support dma_buf objects residing in highmem cpu access is page-based
821 * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
822 * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
823 * returns a pointer in kernel virtual address space. Afterwards the chunk
824 * needs to be unmapped again. There is no limit on how often a given chunk
825 * can be mapped and unmapped, i.e. the importer does not need to call
826 * begin_cpu_access again before mapping the same chunk again.
829 * void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
830 * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
832 * Implementing the functions is optional for exporters and for importers all
833 * the restrictions of using kmap apply.
835 * dma_buf kmap calls outside of the range specified in begin_cpu_access are
836 * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
837 * the partial chunks at the beginning and end but may return stale or bogus
838 * data outside of the range (in these partial chunks).
840 * For some cases the overhead of kmap can be too high, a vmap interface
841 * is introduced. This interface should be used very carefully, as vmalloc
842 * space is a limited resources on many architectures.
845 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
846 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
848 * The vmap call can fail if there is no vmap support in the exporter, or if
849 * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
850 * that the dma-buf layer keeps a reference count for all vmap access and
851 * calls down into the exporter's vmap function only when no vmapping exists,
852 * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
853 * provided by taking the dma_buf->lock mutex.
855 * - For full compatibility on the importer side with existing userspace
856 * interfaces, which might already support mmap'ing buffers. This is needed in
857 * many processing pipelines (e.g. feeding a software rendered image into a
858 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
859 * framework already supported this and for DMA buffer file descriptors to
860 * replace ION buffers mmap support was needed.
862 * There is no special interfaces, userspace simply calls mmap on the dma-buf
863 * fd. But like for CPU access there's a need to braket the actual access,
864 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
865 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
868 * Some systems might need some sort of cache coherency management e.g. when
869 * CPU and GPU domains are being accessed through dma-buf at the same time.
870 * To circumvent this problem there are begin/end coherency markers, that
871 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
872 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
873 * sequence would be used like following:
876 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
877 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
878 * want (with the new data being consumed by say the GPU or the scanout
880 * - munmap once you don't need the buffer any more
882 * For correctness and optimal performance, it is always required to use
883 * SYNC_START and SYNC_END before and after, respectively, when accessing the
884 * mapped address. Userspace cannot rely on coherent access, even when there
885 * are systems where it just works without calling these ioctls.
887 * - And as a CPU fallback in userspace processing pipelines.
889 * Similar to the motivation for kernel cpu access it is again important that
890 * the userspace code of a given importing subsystem can use the same
891 * interfaces with a imported dma-buf buffer object as with a native buffer
892 * object. This is especially important for drm where the userspace part of
893 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
894 * use a different way to mmap a buffer rather invasive.
896 * The assumption in the current dma-buf interfaces is that redirecting the
897 * initial mmap is all that's needed. A survey of some of the existing
898 * subsystems shows that no driver seems to do any nefarious thing like
899 * syncing up with outstanding asynchronous processing on the device or
900 * allocating special resources at fault time. So hopefully this is good
901 * enough, since adding interfaces to intercept pagefaults and allow pte
902 * shootdowns would increase the complexity quite a bit.
905 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
908 * If the importing subsystem simply provides a special-purpose mmap call to
909 * set up a mapping in userspace, calling do_mmap with dma_buf->file will
910 * equally achieve that for a dma-buf object.
913 static int __dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
,
914 enum dma_data_direction direction
)
916 bool write
= (direction
== DMA_BIDIRECTIONAL
||
917 direction
== DMA_TO_DEVICE
);
918 struct reservation_object
*resv
= dmabuf
->resv
;
921 /* Wait on any implicit rendering fences */
922 ret
= reservation_object_wait_timeout_rcu(resv
, write
, true,
923 MAX_SCHEDULE_TIMEOUT
);
931 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
932 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
933 * preparations. Coherency is only guaranteed in the specified range for the
934 * specified access direction.
935 * @dmabuf: [in] buffer to prepare cpu access for.
936 * @direction: [in] length of range for cpu access.
938 * After the cpu access is complete the caller should call
939 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
940 * it guaranteed to be coherent with other DMA access.
942 * Can return negative error values, returns 0 on success.
944 int dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
,
945 enum dma_data_direction direction
)
949 if (WARN_ON(!dmabuf
))
952 if (dmabuf
->ops
->begin_cpu_access
)
953 ret
= dmabuf
->ops
->begin_cpu_access(dmabuf
, direction
);
955 /* Ensure that all fences are waited upon - but we first allow
956 * the native handler the chance to do so more efficiently if it
957 * chooses. A double invocation here will be reasonably cheap no-op.
960 ret
= __dma_buf_begin_cpu_access(dmabuf
, direction
);
964 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access
);
967 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
968 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
969 * actions. Coherency is only guaranteed in the specified range for the
970 * specified access direction.
971 * @dmabuf: [in] buffer to complete cpu access for.
972 * @direction: [in] length of range for cpu access.
974 * This terminates CPU access started with dma_buf_begin_cpu_access().
976 * Can return negative error values, returns 0 on success.
978 int dma_buf_end_cpu_access(struct dma_buf
*dmabuf
,
979 enum dma_data_direction direction
)
985 if (dmabuf
->ops
->end_cpu_access
)
986 ret
= dmabuf
->ops
->end_cpu_access(dmabuf
, direction
);
990 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access
);
993 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
994 * same restrictions as for kmap and friends apply.
995 * @dmabuf: [in] buffer to map page from.
996 * @page_num: [in] page in PAGE_SIZE units to map.
998 * This call must always succeed, any necessary preparations that might fail
999 * need to be done in begin_cpu_access.
1001 void *dma_buf_kmap(struct dma_buf
*dmabuf
, unsigned long page_num
)
1005 if (!dmabuf
->ops
->map
)
1007 return dmabuf
->ops
->map(dmabuf
, page_num
);
1009 EXPORT_SYMBOL_GPL(dma_buf_kmap
);
1012 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
1013 * @dmabuf: [in] buffer to unmap page from.
1014 * @page_num: [in] page in PAGE_SIZE units to unmap.
1015 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
1017 * This call must always succeed.
1019 void dma_buf_kunmap(struct dma_buf
*dmabuf
, unsigned long page_num
,
1024 if (dmabuf
->ops
->unmap
)
1025 dmabuf
->ops
->unmap(dmabuf
, page_num
, vaddr
);
1027 EXPORT_SYMBOL_GPL(dma_buf_kunmap
);
1031 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1032 * @dmabuf: [in] buffer that should back the vma
1033 * @vma: [in] vma for the mmap
1034 * @pgoff: [in] offset in pages where this mmap should start within the
1037 * This function adjusts the passed in vma so that it points at the file of the
1038 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1039 * checking on the size of the vma. Then it calls the exporters mmap function to
1040 * set up the mapping.
1042 * Can return negative error values, returns 0 on success.
1044 int dma_buf_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
,
1045 unsigned long pgoff
)
1047 struct file
*oldfile
;
1050 if (WARN_ON(!dmabuf
|| !vma
))
1053 /* check if buffer supports mmap */
1054 if (!dmabuf
->ops
->mmap
)
1057 /* check for offset overflow */
1058 if (pgoff
+ vma_pages(vma
) < pgoff
)
1061 /* check for overflowing the buffer's size */
1062 if (pgoff
+ vma_pages(vma
) >
1063 dmabuf
->size
>> PAGE_SHIFT
)
1066 /* readjust the vma */
1067 get_file(dmabuf
->file
);
1068 oldfile
= vma
->vm_file
;
1069 vma
->vm_file
= dmabuf
->file
;
1070 vma
->vm_pgoff
= pgoff
;
1072 ret
= dmabuf
->ops
->mmap(dmabuf
, vma
);
1074 /* restore old parameters on failure */
1075 vma
->vm_file
= oldfile
;
1084 EXPORT_SYMBOL_GPL(dma_buf_mmap
);
1087 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1088 * address space. Same restrictions as for vmap and friends apply.
1089 * @dmabuf: [in] buffer to vmap
1091 * This call may fail due to lack of virtual mapping address space.
1092 * These calls are optional in drivers. The intended use for them
1093 * is for mapping objects linear in kernel space for high use objects.
1094 * Please attempt to use kmap/kunmap before thinking about these interfaces.
1096 * Returns NULL on error.
1098 void *dma_buf_vmap(struct dma_buf
*dmabuf
)
1102 if (WARN_ON(!dmabuf
))
1105 if (!dmabuf
->ops
->vmap
)
1108 mutex_lock(&dmabuf
->lock
);
1109 if (dmabuf
->vmapping_counter
) {
1110 dmabuf
->vmapping_counter
++;
1111 BUG_ON(!dmabuf
->vmap_ptr
);
1112 ptr
= dmabuf
->vmap_ptr
;
1116 BUG_ON(dmabuf
->vmap_ptr
);
1118 ptr
= dmabuf
->ops
->vmap(dmabuf
);
1119 if (WARN_ON_ONCE(IS_ERR(ptr
)))
1124 dmabuf
->vmap_ptr
= ptr
;
1125 dmabuf
->vmapping_counter
= 1;
1128 mutex_unlock(&dmabuf
->lock
);
1131 EXPORT_SYMBOL_GPL(dma_buf_vmap
);
1134 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1135 * @dmabuf: [in] buffer to vunmap
1136 * @vaddr: [in] vmap to vunmap
1138 void dma_buf_vunmap(struct dma_buf
*dmabuf
, void *vaddr
)
1140 if (WARN_ON(!dmabuf
))
1143 BUG_ON(!dmabuf
->vmap_ptr
);
1144 BUG_ON(dmabuf
->vmapping_counter
== 0);
1145 BUG_ON(dmabuf
->vmap_ptr
!= vaddr
);
1147 mutex_lock(&dmabuf
->lock
);
1148 if (--dmabuf
->vmapping_counter
== 0) {
1149 if (dmabuf
->ops
->vunmap
)
1150 dmabuf
->ops
->vunmap(dmabuf
, vaddr
);
1151 dmabuf
->vmap_ptr
= NULL
;
1153 mutex_unlock(&dmabuf
->lock
);
1155 EXPORT_SYMBOL_GPL(dma_buf_vunmap
);
1157 #ifdef CONFIG_DEBUG_FS
1158 static int dma_buf_debug_show(struct seq_file
*s
, void *unused
)
1161 struct dma_buf
*buf_obj
;
1162 struct dma_buf_attachment
*attach_obj
;
1163 struct reservation_object
*robj
;
1164 struct reservation_object_list
*fobj
;
1165 struct dma_fence
*fence
;
1167 int count
= 0, attach_count
, shared_count
, i
;
1170 ret
= mutex_lock_interruptible(&db_list
.lock
);
1175 seq_puts(s
, "\nDma-buf Objects:\n");
1176 seq_printf(s
, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1177 "size", "flags", "mode", "count", "ino");
1179 list_for_each_entry(buf_obj
, &db_list
.head
, list_node
) {
1180 ret
= mutex_lock_interruptible(&buf_obj
->lock
);
1184 "\tERROR locking buffer object: skipping\n");
1188 seq_printf(s
, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1190 buf_obj
->file
->f_flags
, buf_obj
->file
->f_mode
,
1191 file_count(buf_obj
->file
),
1193 file_inode(buf_obj
->file
)->i_ino
,
1194 buf_obj
->name
?: "");
1196 robj
= buf_obj
->resv
;
1198 seq
= read_seqcount_begin(&robj
->seq
);
1200 fobj
= rcu_dereference(robj
->fence
);
1201 shared_count
= fobj
? fobj
->shared_count
: 0;
1202 fence
= rcu_dereference(robj
->fence_excl
);
1203 if (!read_seqcount_retry(&robj
->seq
, seq
))
1209 seq_printf(s
, "\tExclusive fence: %s %s %ssignalled\n",
1210 fence
->ops
->get_driver_name(fence
),
1211 fence
->ops
->get_timeline_name(fence
),
1212 dma_fence_is_signaled(fence
) ? "" : "un");
1213 for (i
= 0; i
< shared_count
; i
++) {
1214 fence
= rcu_dereference(fobj
->shared
[i
]);
1215 if (!dma_fence_get_rcu(fence
))
1217 seq_printf(s
, "\tShared fence: %s %s %ssignalled\n",
1218 fence
->ops
->get_driver_name(fence
),
1219 fence
->ops
->get_timeline_name(fence
),
1220 dma_fence_is_signaled(fence
) ? "" : "un");
1221 dma_fence_put(fence
);
1225 seq_puts(s
, "\tAttached Devices:\n");
1228 list_for_each_entry(attach_obj
, &buf_obj
->attachments
, node
) {
1229 seq_printf(s
, "\t%s\n", dev_name(attach_obj
->dev
));
1233 seq_printf(s
, "Total %d devices attached\n\n",
1237 size
+= buf_obj
->size
;
1238 mutex_unlock(&buf_obj
->lock
);
1241 seq_printf(s
, "\nTotal %d objects, %zu bytes\n", count
, size
);
1243 mutex_unlock(&db_list
.lock
);
1247 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug
);
1249 static struct dentry
*dma_buf_debugfs_dir
;
1251 static int dma_buf_init_debugfs(void)
1256 d
= debugfs_create_dir("dma_buf", NULL
);
1260 dma_buf_debugfs_dir
= d
;
1262 d
= debugfs_create_file("bufinfo", S_IRUGO
, dma_buf_debugfs_dir
,
1263 NULL
, &dma_buf_debug_fops
);
1265 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1266 debugfs_remove_recursive(dma_buf_debugfs_dir
);
1267 dma_buf_debugfs_dir
= NULL
;
1274 static void dma_buf_uninit_debugfs(void)
1276 debugfs_remove_recursive(dma_buf_debugfs_dir
);
1279 static inline int dma_buf_init_debugfs(void)
1283 static inline void dma_buf_uninit_debugfs(void)
1288 static int __init
dma_buf_init(void)
1290 dma_buf_mnt
= kern_mount(&dma_buf_fs_type
);
1291 if (IS_ERR(dma_buf_mnt
))
1292 return PTR_ERR(dma_buf_mnt
);
1294 mutex_init(&db_list
.lock
);
1295 INIT_LIST_HEAD(&db_list
.head
);
1296 dma_buf_init_debugfs();
1299 subsys_initcall(dma_buf_init
);
1301 static void __exit
dma_buf_deinit(void)
1303 dma_buf_uninit_debugfs();
1304 kern_unmount(dma_buf_mnt
);
1306 __exitcall(dma_buf_deinit
);