2 * Copyright 2017 Red Hat
3 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
33 * container for a synchronization primitive which can be used by userspace
34 * to explicitly synchronize GPU commands, can be shared between userspace
35 * processes, and can be shared between different DRM drivers.
36 * Their primary use-case is to implement Vulkan fences and semaphores.
37 * The syncobj userspace API provides ioctls for several operations:
39 * - Creation and destruction of syncobjs
40 * - Import and export of syncobjs to/from a syncobj file descriptor
41 * - Import and export a syncobj's underlying fence to/from a sync file
42 * - Reset a syncobj (set its fence to NULL)
43 * - Signal a syncobj (set a trivially signaled fence)
44 * - Wait for a syncobj's fence to appear and be signaled
46 * At it's core, a syncobj is simply a wrapper around a pointer to a struct
47 * &dma_fence which may be NULL.
48 * When a syncobj is first created, its pointer is either NULL or a pointer
49 * to an already signaled fence depending on whether the
50 * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
51 * &DRM_IOCTL_SYNCOBJ_CREATE.
52 * When GPU work which signals a syncobj is enqueued in a DRM driver,
53 * the syncobj fence is replaced with a fence which will be signaled by the
54 * completion of that work.
55 * When GPU work which waits on a syncobj is enqueued in a DRM driver, the
56 * driver retrieves syncobj's current fence at the time the work is enqueued
57 * waits on that fence before submitting the work to hardware.
58 * If the syncobj's fence is NULL, the enqueue operation is expected to fail.
59 * All manipulation of the syncobjs's fence happens in terms of the current
60 * fence at the time the ioctl is called by userspace regardless of whether
61 * that operation is an immediate host-side operation (signal or reset) or
62 * or an operation which is enqueued in some driver queue.
63 * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to
64 * manipulate a syncobj from the host by resetting its pointer to NULL or
65 * setting its pointer to a fence which is already signaled.
68 * Host-side wait on syncobjs
69 * --------------------------
71 * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
72 * host-side wait on all of the syncobj fences simultaneously.
73 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
74 * all of the syncobj fences to be signaled before it returns.
75 * Otherwise, it returns once at least one syncobj fence has been signaled
76 * and the index of a signaled fence is written back to the client.
78 * Unlike the enqueued GPU work dependencies which fail if they see a NULL
79 * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
80 * the host-side wait will first wait for the syncobj to receive a non-NULL
81 * fence and then wait on that fence.
82 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
83 * syncobjs in the array has a NULL fence, -EINVAL will be returned.
84 * Assuming the syncobj starts off with a NULL fence, this allows a client
85 * to do a host wait in one thread (or process) which waits on GPU work
86 * submitted in another thread (or process) without having to manually
87 * synchronize between the two.
88 * This requirement is inherited from the Vulkan fence API.
91 * Import/export of syncobjs
92 * -------------------------
94 * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
95 * provide two mechanisms for import/export of syncobjs.
97 * The first lets the client import or export an entire syncobj to a file
99 * These fd's are opaque and have no other use case, except passing the
100 * syncobj between processes.
101 * All exported file descriptors and any syncobj handles created as a
102 * result of importing those file descriptors own a reference to the
103 * same underlying struct &drm_syncobj and the syncobj can be used
104 * persistently across all the processes with which it is shared.
105 * The syncobj is freed only once the last reference is dropped.
106 * Unlike dma-buf, importing a syncobj creates a new handle (with its own
107 * reference) for every import instead of de-duplicating.
108 * The primary use-case of this persistent import/export is for shared
109 * Vulkan fences and semaphores.
111 * The second import/export mechanism, which is indicated by
112 * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
113 * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
114 * import/export the syncobj's current fence from/to a &sync_file.
115 * When a syncobj is exported to a sync file, that sync file wraps the
116 * sycnobj's fence at the time of export and any later signal or reset
117 * operations on the syncobj will not affect the exported sync file.
118 * When a sync file is imported into a syncobj, the syncobj's fence is set
119 * to the fence wrapped by that sync file.
120 * Because sync files are immutable, resetting or signaling the syncobj
121 * will not affect any sync files whose fences have been imported into the
125 #include <linux/anon_inodes.h>
126 #include <linux/file.h>
127 #include <linux/fs.h>
128 #include <linux/sched/signal.h>
129 #include <linux/sync_file.h>
130 #include <linux/uaccess.h>
133 #include <drm/drm_drv.h>
134 #include <drm/drm_file.h>
135 #include <drm/drm_gem.h>
136 #include <drm/drm_print.h>
137 #include <drm/drm_syncobj.h>
138 #include <drm/drm_utils.h>
140 #include "drm_internal.h"
142 struct syncobj_wait_entry
{
143 struct list_head node
;
144 struct task_struct
*task
;
145 struct dma_fence
*fence
;
146 struct dma_fence_cb fence_cb
;
150 static void syncobj_wait_syncobj_func(struct drm_syncobj
*syncobj
,
151 struct syncobj_wait_entry
*wait
);
154 * drm_syncobj_find - lookup and reference a sync object.
155 * @file_private: drm file private pointer
156 * @handle: sync object handle to lookup.
158 * Returns a reference to the syncobj pointed to by handle or NULL. The
159 * reference must be released by calling drm_syncobj_put().
161 struct drm_syncobj
*drm_syncobj_find(struct drm_file
*file_private
,
164 struct drm_syncobj
*syncobj
;
166 spin_lock(&file_private
->syncobj_table_lock
);
168 /* Check if we currently have a reference on the object */
169 syncobj
= idr_find(&file_private
->syncobj_idr
, handle
);
171 drm_syncobj_get(syncobj
);
173 spin_unlock(&file_private
->syncobj_table_lock
);
177 EXPORT_SYMBOL(drm_syncobj_find
);
179 static void drm_syncobj_fence_add_wait(struct drm_syncobj
*syncobj
,
180 struct syncobj_wait_entry
*wait
)
182 struct dma_fence
*fence
;
187 spin_lock(&syncobj
->lock
);
188 /* We've already tried once to get a fence and failed. Now that we
189 * have the lock, try one more time just to be sure we don't add a
190 * callback when a fence has already been set.
192 fence
= dma_fence_get(rcu_dereference_protected(syncobj
->fence
, 1));
193 if (!fence
|| dma_fence_chain_find_seqno(&fence
, wait
->point
)) {
194 dma_fence_put(fence
);
195 list_add_tail(&wait
->node
, &syncobj
->cb_list
);
197 wait
->fence
= dma_fence_get_stub();
201 spin_unlock(&syncobj
->lock
);
204 static void drm_syncobj_remove_wait(struct drm_syncobj
*syncobj
,
205 struct syncobj_wait_entry
*wait
)
207 if (!wait
->node
.next
)
210 spin_lock(&syncobj
->lock
);
211 list_del_init(&wait
->node
);
212 spin_unlock(&syncobj
->lock
);
216 * drm_syncobj_add_point - add new timeline point to the syncobj
217 * @syncobj: sync object to add timeline point do
218 * @chain: chain node to use to add the point
219 * @fence: fence to encapsulate in the chain node
220 * @point: sequence number to use for the point
222 * Add the chain node as new timeline point to the syncobj.
224 void drm_syncobj_add_point(struct drm_syncobj
*syncobj
,
225 struct dma_fence_chain
*chain
,
226 struct dma_fence
*fence
,
229 struct syncobj_wait_entry
*cur
, *tmp
;
230 struct dma_fence
*prev
;
232 dma_fence_get(fence
);
234 spin_lock(&syncobj
->lock
);
236 prev
= drm_syncobj_fence_get(syncobj
);
237 /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
238 if (prev
&& prev
->seqno
>= point
)
239 DRM_ERROR("You are adding an unorder point to timeline!\n");
240 dma_fence_chain_init(chain
, prev
, fence
, point
);
241 rcu_assign_pointer(syncobj
->fence
, &chain
->base
);
243 list_for_each_entry_safe(cur
, tmp
, &syncobj
->cb_list
, node
)
244 syncobj_wait_syncobj_func(syncobj
, cur
);
245 spin_unlock(&syncobj
->lock
);
247 /* Walk the chain once to trigger garbage collection */
248 dma_fence_chain_for_each(fence
, prev
);
251 EXPORT_SYMBOL(drm_syncobj_add_point
);
254 * drm_syncobj_replace_fence - replace fence in a sync object.
255 * @syncobj: Sync object to replace fence in
256 * @fence: fence to install in sync file.
258 * This replaces the fence on a sync object.
260 void drm_syncobj_replace_fence(struct drm_syncobj
*syncobj
,
261 struct dma_fence
*fence
)
263 struct dma_fence
*old_fence
;
264 struct syncobj_wait_entry
*cur
, *tmp
;
267 dma_fence_get(fence
);
269 spin_lock(&syncobj
->lock
);
271 old_fence
= rcu_dereference_protected(syncobj
->fence
,
272 lockdep_is_held(&syncobj
->lock
));
273 rcu_assign_pointer(syncobj
->fence
, fence
);
275 if (fence
!= old_fence
) {
276 list_for_each_entry_safe(cur
, tmp
, &syncobj
->cb_list
, node
)
277 syncobj_wait_syncobj_func(syncobj
, cur
);
280 spin_unlock(&syncobj
->lock
);
282 dma_fence_put(old_fence
);
284 EXPORT_SYMBOL(drm_syncobj_replace_fence
);
287 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
288 * @syncobj: sync object to assign the fence on
290 * Assign a already signaled stub fence to the sync object.
292 static void drm_syncobj_assign_null_handle(struct drm_syncobj
*syncobj
)
294 struct dma_fence
*fence
= dma_fence_get_stub();
296 drm_syncobj_replace_fence(syncobj
, fence
);
297 dma_fence_put(fence
);
300 /* 5s default for wait submission */
301 #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
303 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
304 * @file_private: drm file private pointer
305 * @handle: sync object handle to lookup.
306 * @point: timeline point
307 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
308 * @fence: out parameter for the fence
310 * This is just a convenience function that combines drm_syncobj_find() and
311 * drm_syncobj_fence_get().
313 * Returns 0 on success or a negative error value on failure. On success @fence
314 * contains a reference to the fence, which must be released by calling
317 int drm_syncobj_find_fence(struct drm_file
*file_private
,
318 u32 handle
, u64 point
, u64 flags
,
319 struct dma_fence
**fence
)
321 struct drm_syncobj
*syncobj
= drm_syncobj_find(file_private
, handle
);
322 struct syncobj_wait_entry wait
;
323 u64 timeout
= nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT
);
329 *fence
= drm_syncobj_fence_get(syncobj
);
330 drm_syncobj_put(syncobj
);
333 ret
= dma_fence_chain_find_seqno(fence
, point
);
336 dma_fence_put(*fence
);
341 if (!(flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
))
344 memset(&wait
, 0, sizeof(wait
));
347 drm_syncobj_fence_add_wait(syncobj
, &wait
);
350 set_current_state(TASK_INTERRUPTIBLE
);
360 if (signal_pending(current
)) {
365 timeout
= schedule_timeout(timeout
);
368 __set_current_state(TASK_RUNNING
);
372 drm_syncobj_remove_wait(syncobj
, &wait
);
376 EXPORT_SYMBOL(drm_syncobj_find_fence
);
379 * drm_syncobj_free - free a sync object.
380 * @kref: kref to free.
382 * Only to be called from kref_put in drm_syncobj_put.
384 void drm_syncobj_free(struct kref
*kref
)
386 struct drm_syncobj
*syncobj
= container_of(kref
,
389 drm_syncobj_replace_fence(syncobj
, NULL
);
392 EXPORT_SYMBOL(drm_syncobj_free
);
395 * drm_syncobj_create - create a new syncobj
396 * @out_syncobj: returned syncobj
397 * @flags: DRM_SYNCOBJ_* flags
398 * @fence: if non-NULL, the syncobj will represent this fence
400 * This is the first function to create a sync object. After creating, drivers
401 * probably want to make it available to userspace, either through
402 * drm_syncobj_get_handle() or drm_syncobj_get_fd().
404 * Returns 0 on success or a negative error value on failure.
406 int drm_syncobj_create(struct drm_syncobj
**out_syncobj
, uint32_t flags
,
407 struct dma_fence
*fence
)
409 struct drm_syncobj
*syncobj
;
411 syncobj
= kzalloc(sizeof(struct drm_syncobj
), GFP_KERNEL
);
415 kref_init(&syncobj
->refcount
);
416 INIT_LIST_HEAD(&syncobj
->cb_list
);
417 spin_lock_init(&syncobj
->lock
);
419 if (flags
& DRM_SYNCOBJ_CREATE_SIGNALED
)
420 drm_syncobj_assign_null_handle(syncobj
);
423 drm_syncobj_replace_fence(syncobj
, fence
);
425 *out_syncobj
= syncobj
;
428 EXPORT_SYMBOL(drm_syncobj_create
);
431 * drm_syncobj_get_handle - get a handle from a syncobj
432 * @file_private: drm file private pointer
433 * @syncobj: Sync object to export
434 * @handle: out parameter with the new handle
436 * Exports a sync object created with drm_syncobj_create() as a handle on
437 * @file_private to userspace.
439 * Returns 0 on success or a negative error value on failure.
441 int drm_syncobj_get_handle(struct drm_file
*file_private
,
442 struct drm_syncobj
*syncobj
, u32
*handle
)
446 /* take a reference to put in the idr */
447 drm_syncobj_get(syncobj
);
449 idr_preload(GFP_KERNEL
);
450 spin_lock(&file_private
->syncobj_table_lock
);
451 ret
= idr_alloc(&file_private
->syncobj_idr
, syncobj
, 1, 0, GFP_NOWAIT
);
452 spin_unlock(&file_private
->syncobj_table_lock
);
457 drm_syncobj_put(syncobj
);
464 EXPORT_SYMBOL(drm_syncobj_get_handle
);
466 static int drm_syncobj_create_as_handle(struct drm_file
*file_private
,
467 u32
*handle
, uint32_t flags
)
470 struct drm_syncobj
*syncobj
;
472 ret
= drm_syncobj_create(&syncobj
, flags
, NULL
);
476 ret
= drm_syncobj_get_handle(file_private
, syncobj
, handle
);
477 drm_syncobj_put(syncobj
);
481 static int drm_syncobj_destroy(struct drm_file
*file_private
,
484 struct drm_syncobj
*syncobj
;
486 spin_lock(&file_private
->syncobj_table_lock
);
487 syncobj
= idr_remove(&file_private
->syncobj_idr
, handle
);
488 spin_unlock(&file_private
->syncobj_table_lock
);
493 drm_syncobj_put(syncobj
);
497 static int drm_syncobj_file_release(struct inode
*inode
, struct file
*file
)
499 struct drm_syncobj
*syncobj
= file
->private_data
;
501 drm_syncobj_put(syncobj
);
505 static const struct file_operations drm_syncobj_file_fops
= {
506 .release
= drm_syncobj_file_release
,
510 * drm_syncobj_get_fd - get a file descriptor from a syncobj
511 * @syncobj: Sync object to export
512 * @p_fd: out parameter with the new file descriptor
514 * Exports a sync object created with drm_syncobj_create() as a file descriptor.
516 * Returns 0 on success or a negative error value on failure.
518 int drm_syncobj_get_fd(struct drm_syncobj
*syncobj
, int *p_fd
)
523 fd
= get_unused_fd_flags(O_CLOEXEC
);
527 file
= anon_inode_getfile("syncobj_file",
528 &drm_syncobj_file_fops
,
532 return PTR_ERR(file
);
535 drm_syncobj_get(syncobj
);
536 fd_install(fd
, file
);
541 EXPORT_SYMBOL(drm_syncobj_get_fd
);
543 static int drm_syncobj_handle_to_fd(struct drm_file
*file_private
,
544 u32 handle
, int *p_fd
)
546 struct drm_syncobj
*syncobj
= drm_syncobj_find(file_private
, handle
);
552 ret
= drm_syncobj_get_fd(syncobj
, p_fd
);
553 drm_syncobj_put(syncobj
);
557 static int drm_syncobj_fd_to_handle(struct drm_file
*file_private
,
560 struct drm_syncobj
*syncobj
;
561 struct fd f
= fdget(fd
);
567 if (f
.file
->f_op
!= &drm_syncobj_file_fops
) {
572 /* take a reference to put in the idr */
573 syncobj
= f
.file
->private_data
;
574 drm_syncobj_get(syncobj
);
576 idr_preload(GFP_KERNEL
);
577 spin_lock(&file_private
->syncobj_table_lock
);
578 ret
= idr_alloc(&file_private
->syncobj_idr
, syncobj
, 1, 0, GFP_NOWAIT
);
579 spin_unlock(&file_private
->syncobj_table_lock
);
586 drm_syncobj_put(syncobj
);
592 static int drm_syncobj_import_sync_file_fence(struct drm_file
*file_private
,
595 struct dma_fence
*fence
= sync_file_get_fence(fd
);
596 struct drm_syncobj
*syncobj
;
601 syncobj
= drm_syncobj_find(file_private
, handle
);
603 dma_fence_put(fence
);
607 drm_syncobj_replace_fence(syncobj
, fence
);
608 dma_fence_put(fence
);
609 drm_syncobj_put(syncobj
);
613 static int drm_syncobj_export_sync_file(struct drm_file
*file_private
,
614 int handle
, int *p_fd
)
617 struct dma_fence
*fence
;
618 struct sync_file
*sync_file
;
619 int fd
= get_unused_fd_flags(O_CLOEXEC
);
624 ret
= drm_syncobj_find_fence(file_private
, handle
, 0, 0, &fence
);
628 sync_file
= sync_file_create(fence
);
630 dma_fence_put(fence
);
637 fd_install(fd
, sync_file
->file
);
646 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
647 * @file_private: drm file-private structure to set up
649 * Called at device open time, sets up the structure for handling refcounting
653 drm_syncobj_open(struct drm_file
*file_private
)
655 idr_init_base(&file_private
->syncobj_idr
, 1);
656 spin_lock_init(&file_private
->syncobj_table_lock
);
660 drm_syncobj_release_handle(int id
, void *ptr
, void *data
)
662 struct drm_syncobj
*syncobj
= ptr
;
664 drm_syncobj_put(syncobj
);
669 * drm_syncobj_release - release file-private sync object resources
670 * @file_private: drm file-private structure to clean up
672 * Called at close time when the filp is going away.
674 * Releases any remaining references on objects by this filp.
677 drm_syncobj_release(struct drm_file
*file_private
)
679 idr_for_each(&file_private
->syncobj_idr
,
680 &drm_syncobj_release_handle
, file_private
);
681 idr_destroy(&file_private
->syncobj_idr
);
685 drm_syncobj_create_ioctl(struct drm_device
*dev
, void *data
,
686 struct drm_file
*file_private
)
688 struct drm_syncobj_create
*args
= data
;
690 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
693 /* no valid flags yet */
694 if (args
->flags
& ~DRM_SYNCOBJ_CREATE_SIGNALED
)
697 return drm_syncobj_create_as_handle(file_private
,
698 &args
->handle
, args
->flags
);
702 drm_syncobj_destroy_ioctl(struct drm_device
*dev
, void *data
,
703 struct drm_file
*file_private
)
705 struct drm_syncobj_destroy
*args
= data
;
707 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
710 /* make sure padding is empty */
713 return drm_syncobj_destroy(file_private
, args
->handle
);
717 drm_syncobj_handle_to_fd_ioctl(struct drm_device
*dev
, void *data
,
718 struct drm_file
*file_private
)
720 struct drm_syncobj_handle
*args
= data
;
722 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
728 if (args
->flags
!= 0 &&
729 args
->flags
!= DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE
)
732 if (args
->flags
& DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE
)
733 return drm_syncobj_export_sync_file(file_private
, args
->handle
,
736 return drm_syncobj_handle_to_fd(file_private
, args
->handle
,
741 drm_syncobj_fd_to_handle_ioctl(struct drm_device
*dev
, void *data
,
742 struct drm_file
*file_private
)
744 struct drm_syncobj_handle
*args
= data
;
746 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
752 if (args
->flags
!= 0 &&
753 args
->flags
!= DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE
)
756 if (args
->flags
& DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE
)
757 return drm_syncobj_import_sync_file_fence(file_private
,
761 return drm_syncobj_fd_to_handle(file_private
, args
->fd
,
765 static int drm_syncobj_transfer_to_timeline(struct drm_file
*file_private
,
766 struct drm_syncobj_transfer
*args
)
768 struct drm_syncobj
*timeline_syncobj
= NULL
;
769 struct dma_fence
*fence
;
770 struct dma_fence_chain
*chain
;
773 timeline_syncobj
= drm_syncobj_find(file_private
, args
->dst_handle
);
774 if (!timeline_syncobj
) {
777 ret
= drm_syncobj_find_fence(file_private
, args
->src_handle
,
778 args
->src_point
, args
->flags
,
782 chain
= kzalloc(sizeof(struct dma_fence_chain
), GFP_KERNEL
);
787 drm_syncobj_add_point(timeline_syncobj
, chain
, fence
, args
->dst_point
);
789 dma_fence_put(fence
);
791 drm_syncobj_put(timeline_syncobj
);
797 drm_syncobj_transfer_to_binary(struct drm_file
*file_private
,
798 struct drm_syncobj_transfer
*args
)
800 struct drm_syncobj
*binary_syncobj
= NULL
;
801 struct dma_fence
*fence
;
804 binary_syncobj
= drm_syncobj_find(file_private
, args
->dst_handle
);
807 ret
= drm_syncobj_find_fence(file_private
, args
->src_handle
,
808 args
->src_point
, args
->flags
, &fence
);
811 drm_syncobj_replace_fence(binary_syncobj
, fence
);
812 dma_fence_put(fence
);
814 drm_syncobj_put(binary_syncobj
);
819 drm_syncobj_transfer_ioctl(struct drm_device
*dev
, void *data
,
820 struct drm_file
*file_private
)
822 struct drm_syncobj_transfer
*args
= data
;
825 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ_TIMELINE
))
832 ret
= drm_syncobj_transfer_to_timeline(file_private
, args
);
834 ret
= drm_syncobj_transfer_to_binary(file_private
, args
);
839 static void syncobj_wait_fence_func(struct dma_fence
*fence
,
840 struct dma_fence_cb
*cb
)
842 struct syncobj_wait_entry
*wait
=
843 container_of(cb
, struct syncobj_wait_entry
, fence_cb
);
845 wake_up_process(wait
->task
);
848 static void syncobj_wait_syncobj_func(struct drm_syncobj
*syncobj
,
849 struct syncobj_wait_entry
*wait
)
851 struct dma_fence
*fence
;
853 /* This happens inside the syncobj lock */
854 fence
= rcu_dereference_protected(syncobj
->fence
,
855 lockdep_is_held(&syncobj
->lock
));
856 dma_fence_get(fence
);
857 if (!fence
|| dma_fence_chain_find_seqno(&fence
, wait
->point
)) {
858 dma_fence_put(fence
);
861 wait
->fence
= dma_fence_get_stub();
866 wake_up_process(wait
->task
);
867 list_del_init(&wait
->node
);
870 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj
**syncobjs
,
871 void __user
*user_points
,
877 struct syncobj_wait_entry
*entries
;
878 struct dma_fence
*fence
;
880 uint32_t signaled_count
, i
;
882 points
= kmalloc_array(count
, sizeof(*points
), GFP_KERNEL
);
887 memset(points
, 0, count
* sizeof(uint64_t));
889 } else if (copy_from_user(points
, user_points
,
890 sizeof(uint64_t) * count
)) {
892 goto err_free_points
;
895 entries
= kcalloc(count
, sizeof(*entries
), GFP_KERNEL
);
898 goto err_free_points
;
900 /* Walk the list of sync objects and initialize entries. We do
901 * this up-front so that we can properly return -EINVAL if there is
902 * a syncobj with a missing fence and then never have the chance of
903 * returning -EINVAL again.
906 for (i
= 0; i
< count
; ++i
) {
907 struct dma_fence
*fence
;
909 entries
[i
].task
= current
;
910 entries
[i
].point
= points
[i
];
911 fence
= drm_syncobj_fence_get(syncobjs
[i
]);
912 if (!fence
|| dma_fence_chain_find_seqno(&fence
, points
[i
])) {
913 dma_fence_put(fence
);
914 if (flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
) {
918 goto cleanup_entries
;
923 entries
[i
].fence
= fence
;
925 entries
[i
].fence
= dma_fence_get_stub();
927 if ((flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE
) ||
928 dma_fence_is_signaled(entries
[i
].fence
)) {
929 if (signaled_count
== 0 && idx
)
935 if (signaled_count
== count
||
936 (signaled_count
> 0 &&
937 !(flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
)))
938 goto cleanup_entries
;
940 /* There's a very annoying laxness in the dma_fence API here, in
941 * that backends are not required to automatically report when a
942 * fence is signaled prior to fence->ops->enable_signaling() being
943 * called. So here if we fail to match signaled_count, we need to
944 * fallthough and try a 0 timeout wait!
947 if (flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
) {
948 for (i
= 0; i
< count
; ++i
)
949 drm_syncobj_fence_add_wait(syncobjs
[i
], &entries
[i
]);
953 set_current_state(TASK_INTERRUPTIBLE
);
956 for (i
= 0; i
< count
; ++i
) {
957 fence
= entries
[i
].fence
;
961 if ((flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE
) ||
962 dma_fence_is_signaled(fence
) ||
963 (!entries
[i
].fence_cb
.func
&&
964 dma_fence_add_callback(fence
,
965 &entries
[i
].fence_cb
,
966 syncobj_wait_fence_func
))) {
967 /* The fence has been signaled */
968 if (flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
) {
978 if (signaled_count
== count
)
986 if (signal_pending(current
)) {
987 timeout
= -ERESTARTSYS
;
991 timeout
= schedule_timeout(timeout
);
995 __set_current_state(TASK_RUNNING
);
998 for (i
= 0; i
< count
; ++i
) {
999 drm_syncobj_remove_wait(syncobjs
[i
], &entries
[i
]);
1000 if (entries
[i
].fence_cb
.func
)
1001 dma_fence_remove_callback(entries
[i
].fence
,
1002 &entries
[i
].fence_cb
);
1003 dma_fence_put(entries
[i
].fence
);
1014 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
1016 * @timeout_nsec: timeout nsec component in ns, 0 for poll
1018 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
1020 signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec
)
1022 ktime_t abs_timeout
, now
;
1023 u64 timeout_ns
, timeout_jiffies64
;
1025 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
1026 if (timeout_nsec
== 0)
1029 abs_timeout
= ns_to_ktime(timeout_nsec
);
1032 if (!ktime_after(abs_timeout
, now
))
1035 timeout_ns
= ktime_to_ns(ktime_sub(abs_timeout
, now
));
1037 timeout_jiffies64
= nsecs_to_jiffies64(timeout_ns
);
1038 /* clamp timeout to avoid infinite timeout */
1039 if (timeout_jiffies64
>= MAX_SCHEDULE_TIMEOUT
- 1)
1040 return MAX_SCHEDULE_TIMEOUT
- 1;
1042 return timeout_jiffies64
+ 1;
1044 EXPORT_SYMBOL(drm_timeout_abs_to_jiffies
);
1046 static int drm_syncobj_array_wait(struct drm_device
*dev
,
1047 struct drm_file
*file_private
,
1048 struct drm_syncobj_wait
*wait
,
1049 struct drm_syncobj_timeline_wait
*timeline_wait
,
1050 struct drm_syncobj
**syncobjs
, bool timeline
)
1052 signed long timeout
= 0;
1053 uint32_t first
= ~0;
1056 timeout
= drm_timeout_abs_to_jiffies(wait
->timeout_nsec
);
1057 timeout
= drm_syncobj_array_wait_timeout(syncobjs
,
1059 wait
->count_handles
,
1064 wait
->first_signaled
= first
;
1066 timeout
= drm_timeout_abs_to_jiffies(timeline_wait
->timeout_nsec
);
1067 timeout
= drm_syncobj_array_wait_timeout(syncobjs
,
1068 u64_to_user_ptr(timeline_wait
->points
),
1069 timeline_wait
->count_handles
,
1070 timeline_wait
->flags
,
1074 timeline_wait
->first_signaled
= first
;
1079 static int drm_syncobj_array_find(struct drm_file
*file_private
,
1080 void __user
*user_handles
,
1081 uint32_t count_handles
,
1082 struct drm_syncobj
***syncobjs_out
)
1084 uint32_t i
, *handles
;
1085 struct drm_syncobj
**syncobjs
;
1088 handles
= kmalloc_array(count_handles
, sizeof(*handles
), GFP_KERNEL
);
1089 if (handles
== NULL
)
1092 if (copy_from_user(handles
, user_handles
,
1093 sizeof(uint32_t) * count_handles
)) {
1095 goto err_free_handles
;
1098 syncobjs
= kmalloc_array(count_handles
, sizeof(*syncobjs
), GFP_KERNEL
);
1099 if (syncobjs
== NULL
) {
1101 goto err_free_handles
;
1104 for (i
= 0; i
< count_handles
; i
++) {
1105 syncobjs
[i
] = drm_syncobj_find(file_private
, handles
[i
]);
1108 goto err_put_syncobjs
;
1113 *syncobjs_out
= syncobjs
;
1118 drm_syncobj_put(syncobjs
[i
]);
1126 static void drm_syncobj_array_free(struct drm_syncobj
**syncobjs
,
1130 for (i
= 0; i
< count
; i
++)
1131 drm_syncobj_put(syncobjs
[i
]);
1136 drm_syncobj_wait_ioctl(struct drm_device
*dev
, void *data
,
1137 struct drm_file
*file_private
)
1139 struct drm_syncobj_wait
*args
= data
;
1140 struct drm_syncobj
**syncobjs
;
1143 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
1146 if (args
->flags
& ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
|
1147 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
))
1150 if (args
->count_handles
== 0)
1153 ret
= drm_syncobj_array_find(file_private
,
1154 u64_to_user_ptr(args
->handles
),
1155 args
->count_handles
,
1160 ret
= drm_syncobj_array_wait(dev
, file_private
,
1161 args
, NULL
, syncobjs
, false);
1163 drm_syncobj_array_free(syncobjs
, args
->count_handles
);
1169 drm_syncobj_timeline_wait_ioctl(struct drm_device
*dev
, void *data
,
1170 struct drm_file
*file_private
)
1172 struct drm_syncobj_timeline_wait
*args
= data
;
1173 struct drm_syncobj
**syncobjs
;
1176 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ_TIMELINE
))
1179 if (args
->flags
& ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
|
1180 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
|
1181 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE
))
1184 if (args
->count_handles
== 0)
1187 ret
= drm_syncobj_array_find(file_private
,
1188 u64_to_user_ptr(args
->handles
),
1189 args
->count_handles
,
1194 ret
= drm_syncobj_array_wait(dev
, file_private
,
1195 NULL
, args
, syncobjs
, true);
1197 drm_syncobj_array_free(syncobjs
, args
->count_handles
);
1204 drm_syncobj_reset_ioctl(struct drm_device
*dev
, void *data
,
1205 struct drm_file
*file_private
)
1207 struct drm_syncobj_array
*args
= data
;
1208 struct drm_syncobj
**syncobjs
;
1212 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
1218 if (args
->count_handles
== 0)
1221 ret
= drm_syncobj_array_find(file_private
,
1222 u64_to_user_ptr(args
->handles
),
1223 args
->count_handles
,
1228 for (i
= 0; i
< args
->count_handles
; i
++)
1229 drm_syncobj_replace_fence(syncobjs
[i
], NULL
);
1231 drm_syncobj_array_free(syncobjs
, args
->count_handles
);
1237 drm_syncobj_signal_ioctl(struct drm_device
*dev
, void *data
,
1238 struct drm_file
*file_private
)
1240 struct drm_syncobj_array
*args
= data
;
1241 struct drm_syncobj
**syncobjs
;
1245 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
1251 if (args
->count_handles
== 0)
1254 ret
= drm_syncobj_array_find(file_private
,
1255 u64_to_user_ptr(args
->handles
),
1256 args
->count_handles
,
1261 for (i
= 0; i
< args
->count_handles
; i
++)
1262 drm_syncobj_assign_null_handle(syncobjs
[i
]);
1264 drm_syncobj_array_free(syncobjs
, args
->count_handles
);
1270 drm_syncobj_timeline_signal_ioctl(struct drm_device
*dev
, void *data
,
1271 struct drm_file
*file_private
)
1273 struct drm_syncobj_timeline_array
*args
= data
;
1274 struct drm_syncobj
**syncobjs
;
1275 struct dma_fence_chain
**chains
;
1280 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ_TIMELINE
))
1283 if (args
->flags
!= 0)
1286 if (args
->count_handles
== 0)
1289 ret
= drm_syncobj_array_find(file_private
,
1290 u64_to_user_ptr(args
->handles
),
1291 args
->count_handles
,
1296 points
= kmalloc_array(args
->count_handles
, sizeof(*points
),
1302 if (!u64_to_user_ptr(args
->points
)) {
1303 memset(points
, 0, args
->count_handles
* sizeof(uint64_t));
1304 } else if (copy_from_user(points
, u64_to_user_ptr(args
->points
),
1305 sizeof(uint64_t) * args
->count_handles
)) {
1310 chains
= kmalloc_array(args
->count_handles
, sizeof(void *), GFP_KERNEL
);
1315 for (i
= 0; i
< args
->count_handles
; i
++) {
1316 chains
[i
] = kzalloc(sizeof(struct dma_fence_chain
), GFP_KERNEL
);
1318 for (j
= 0; j
< i
; j
++)
1325 for (i
= 0; i
< args
->count_handles
; i
++) {
1326 struct dma_fence
*fence
= dma_fence_get_stub();
1328 drm_syncobj_add_point(syncobjs
[i
], chains
[i
],
1330 dma_fence_put(fence
);
1337 drm_syncobj_array_free(syncobjs
, args
->count_handles
);
1342 int drm_syncobj_query_ioctl(struct drm_device
*dev
, void *data
,
1343 struct drm_file
*file_private
)
1345 struct drm_syncobj_timeline_array
*args
= data
;
1346 struct drm_syncobj
**syncobjs
;
1347 uint64_t __user
*points
= u64_to_user_ptr(args
->points
);
1351 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ_TIMELINE
))
1354 if (args
->flags
& ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED
)
1357 if (args
->count_handles
== 0)
1360 ret
= drm_syncobj_array_find(file_private
,
1361 u64_to_user_ptr(args
->handles
),
1362 args
->count_handles
,
1367 for (i
= 0; i
< args
->count_handles
; i
++) {
1368 struct dma_fence_chain
*chain
;
1369 struct dma_fence
*fence
;
1372 fence
= drm_syncobj_fence_get(syncobjs
[i
]);
1373 chain
= to_dma_fence_chain(fence
);
1375 struct dma_fence
*iter
, *last_signaled
=
1376 dma_fence_get(fence
);
1379 DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED
) {
1380 point
= fence
->seqno
;
1382 dma_fence_chain_for_each(iter
, fence
) {
1383 if (iter
->context
!= fence
->context
) {
1384 dma_fence_put(iter
);
1385 /* It is most likely that timeline has
1386 * unorder points. */
1389 dma_fence_put(last_signaled
);
1390 last_signaled
= dma_fence_get(iter
);
1392 point
= dma_fence_is_signaled(last_signaled
) ?
1393 last_signaled
->seqno
:
1394 to_dma_fence_chain(last_signaled
)->prev_seqno
;
1396 dma_fence_put(last_signaled
);
1400 dma_fence_put(fence
);
1401 ret
= copy_to_user(&points
[i
], &point
, sizeof(uint64_t));
1402 ret
= ret
? -EFAULT
: 0;
1406 drm_syncobj_array_free(syncobjs
, args
->count_handles
);