4 * Copyright (C) 2012 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/debugfs.h>
18 #include <linux/file.h>
20 #include <linux/kernel.h>
21 #include <linux/poll.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/sync.h>
26 #include <linux/uaccess.h>
28 #include <linux/anon_inodes.h>
30 static void sync_fence_signal_pt(struct sync_pt
*pt
);
31 static int _sync_pt_has_signaled(struct sync_pt
*pt
);
33 static LIST_HEAD(sync_timeline_list_head
);
34 static DEFINE_SPINLOCK(sync_timeline_list_lock
);
36 static LIST_HEAD(sync_fence_list_head
);
37 static DEFINE_SPINLOCK(sync_fence_list_lock
);
39 struct sync_timeline
*sync_timeline_create(const struct sync_timeline_ops
*ops
,
40 int size
, const char *name
)
42 struct sync_timeline
*obj
;
45 if (size
< sizeof(struct sync_timeline
))
48 obj
= kzalloc(size
, GFP_KERNEL
);
53 strlcpy(obj
->name
, name
, sizeof(obj
->name
));
55 INIT_LIST_HEAD(&obj
->child_list_head
);
56 spin_lock_init(&obj
->child_list_lock
);
58 INIT_LIST_HEAD(&obj
->active_list_head
);
59 spin_lock_init(&obj
->active_list_lock
);
61 spin_lock_irqsave(&sync_timeline_list_lock
, flags
);
62 list_add_tail(&obj
->sync_timeline_list
, &sync_timeline_list_head
);
63 spin_unlock_irqrestore(&sync_timeline_list_lock
, flags
);
68 static void sync_timeline_free(struct sync_timeline
*obj
)
72 if (obj
->ops
->release_obj
)
73 obj
->ops
->release_obj(obj
);
75 spin_lock_irqsave(&sync_timeline_list_lock
, flags
);
76 list_del(&obj
->sync_timeline_list
);
77 spin_unlock_irqrestore(&sync_timeline_list_lock
, flags
);
82 void sync_timeline_destroy(struct sync_timeline
*obj
)
87 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
88 obj
->destroyed
= true;
89 needs_freeing
= list_empty(&obj
->child_list_head
);
90 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
93 sync_timeline_free(obj
);
95 sync_timeline_signal(obj
);
98 static void sync_timeline_add_pt(struct sync_timeline
*obj
, struct sync_pt
*pt
)
104 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
105 list_add_tail(&pt
->child_list
, &obj
->child_list_head
);
106 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
109 static void sync_timeline_remove_pt(struct sync_pt
*pt
)
111 struct sync_timeline
*obj
= pt
->parent
;
115 spin_lock_irqsave(&obj
->active_list_lock
, flags
);
116 if (!list_empty(&pt
->active_list
))
117 list_del_init(&pt
->active_list
);
118 spin_unlock_irqrestore(&obj
->active_list_lock
, flags
);
120 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
121 list_del(&pt
->child_list
);
122 needs_freeing
= obj
->destroyed
&& list_empty(&obj
->child_list_head
);
123 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
126 sync_timeline_free(obj
);
129 void sync_timeline_signal(struct sync_timeline
*obj
)
132 LIST_HEAD(signaled_pts
);
133 struct list_head
*pos
, *n
;
135 spin_lock_irqsave(&obj
->active_list_lock
, flags
);
137 list_for_each_safe(pos
, n
, &obj
->active_list_head
) {
139 container_of(pos
, struct sync_pt
, active_list
);
141 if (_sync_pt_has_signaled(pt
))
142 list_move(pos
, &signaled_pts
);
145 spin_unlock_irqrestore(&obj
->active_list_lock
, flags
);
147 list_for_each_safe(pos
, n
, &signaled_pts
) {
149 container_of(pos
, struct sync_pt
, active_list
);
152 sync_fence_signal_pt(pt
);
156 struct sync_pt
*sync_pt_create(struct sync_timeline
*parent
, int size
)
160 if (size
< sizeof(struct sync_pt
))
163 pt
= kzalloc(size
, GFP_KERNEL
);
167 INIT_LIST_HEAD(&pt
->active_list
);
168 sync_timeline_add_pt(parent
, pt
);
173 void sync_pt_free(struct sync_pt
*pt
)
175 if (pt
->parent
->ops
->free_pt
)
176 pt
->parent
->ops
->free_pt(pt
);
178 sync_timeline_remove_pt(pt
);
183 /* call with pt->parent->active_list_lock held */
184 static int _sync_pt_has_signaled(struct sync_pt
*pt
)
186 int old_status
= pt
->status
;
189 pt
->status
= pt
->parent
->ops
->has_signaled(pt
);
191 if (!pt
->status
&& pt
->parent
->destroyed
)
192 pt
->status
= -ENOENT
;
194 if (pt
->status
!= old_status
)
195 pt
->timestamp
= ktime_get();
200 static struct sync_pt
*sync_pt_dup(struct sync_pt
*pt
)
202 return pt
->parent
->ops
->dup(pt
);
205 /* Adds a sync pt to the active queue. Called when added to a fence */
206 static void sync_pt_activate(struct sync_pt
*pt
)
208 struct sync_timeline
*obj
= pt
->parent
;
212 spin_lock_irqsave(&obj
->active_list_lock
, flags
);
214 err
= _sync_pt_has_signaled(pt
);
218 list_add_tail(&pt
->active_list
, &obj
->active_list_head
);
221 spin_unlock_irqrestore(&obj
->active_list_lock
, flags
);
224 static int sync_fence_release(struct inode
*inode
, struct file
*file
);
225 static unsigned int sync_fence_poll(struct file
*file
, poll_table
*wait
);
226 static long sync_fence_ioctl(struct file
*file
, unsigned int cmd
,
230 static const struct file_operations sync_fence_fops
= {
231 .release
= sync_fence_release
,
232 .poll
= sync_fence_poll
,
233 .unlocked_ioctl
= sync_fence_ioctl
,
236 static struct sync_fence
*sync_fence_alloc(const char *name
)
238 struct sync_fence
*fence
;
241 fence
= kzalloc(sizeof(struct sync_fence
), GFP_KERNEL
);
245 fence
->file
= anon_inode_getfile("sync_fence", &sync_fence_fops
,
247 if (fence
->file
== NULL
)
250 strlcpy(fence
->name
, name
, sizeof(fence
->name
));
252 INIT_LIST_HEAD(&fence
->pt_list_head
);
253 INIT_LIST_HEAD(&fence
->waiter_list_head
);
254 spin_lock_init(&fence
->waiter_list_lock
);
256 init_waitqueue_head(&fence
->wq
);
258 spin_lock_irqsave(&sync_fence_list_lock
, flags
);
259 list_add_tail(&fence
->sync_fence_list
, &sync_fence_list_head
);
260 spin_unlock_irqrestore(&sync_fence_list_lock
, flags
);
269 /* TODO: implement a create which takes more that one sync_pt */
270 struct sync_fence
*sync_fence_create(const char *name
, struct sync_pt
*pt
)
272 struct sync_fence
*fence
;
277 fence
= sync_fence_alloc(name
);
282 list_add(&pt
->pt_list
, &fence
->pt_list_head
);
283 sync_pt_activate(pt
);
288 static int sync_fence_copy_pts(struct sync_fence
*dst
, struct sync_fence
*src
)
290 struct list_head
*pos
;
292 list_for_each(pos
, &src
->pt_list_head
) {
293 struct sync_pt
*orig_pt
=
294 container_of(pos
, struct sync_pt
, pt_list
);
295 struct sync_pt
*new_pt
= sync_pt_dup(orig_pt
);
301 list_add(&new_pt
->pt_list
, &dst
->pt_list_head
);
302 sync_pt_activate(new_pt
);
308 static void sync_fence_free_pts(struct sync_fence
*fence
)
310 struct list_head
*pos
, *n
;
312 list_for_each_safe(pos
, n
, &fence
->pt_list_head
) {
313 struct sync_pt
*pt
= container_of(pos
, struct sync_pt
, pt_list
);
318 struct sync_fence
*sync_fence_fdget(int fd
)
320 struct file
*file
= fget(fd
);
325 if (file
->f_op
!= &sync_fence_fops
)
328 return file
->private_data
;
335 void sync_fence_put(struct sync_fence
*fence
)
340 void sync_fence_install(struct sync_fence
*fence
, int fd
)
342 fd_install(fd
, fence
->file
);
345 static int sync_fence_get_status(struct sync_fence
*fence
)
347 struct list_head
*pos
;
350 list_for_each(pos
, &fence
->pt_list_head
) {
351 struct sync_pt
*pt
= container_of(pos
, struct sync_pt
, pt_list
);
352 int pt_status
= pt
->status
;
357 } else if (status
== 1) {
365 struct sync_fence
*sync_fence_merge(const char *name
,
366 struct sync_fence
*a
, struct sync_fence
*b
)
368 struct sync_fence
*fence
;
371 fence
= sync_fence_alloc(name
);
375 err
= sync_fence_copy_pts(fence
, a
);
379 err
= sync_fence_copy_pts(fence
, b
);
383 fence
->status
= sync_fence_get_status(fence
);
387 sync_fence_free_pts(fence
);
392 static void sync_fence_signal_pt(struct sync_pt
*pt
)
394 LIST_HEAD(signaled_waiters
);
395 struct sync_fence
*fence
= pt
->fence
;
396 struct list_head
*pos
;
401 status
= sync_fence_get_status(fence
);
403 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
405 * this should protect against two threads racing on the signaled
406 * false -> true transition
408 if (status
&& !fence
->status
) {
409 list_for_each_safe(pos
, n
, &fence
->waiter_list_head
)
410 list_move(pos
, &signaled_waiters
);
412 fence
->status
= status
;
416 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
419 list_for_each_safe(pos
, n
, &signaled_waiters
) {
420 struct sync_fence_waiter
*waiter
=
421 container_of(pos
, struct sync_fence_waiter
,
424 waiter
->callback(fence
, waiter
->callback_data
);
432 int sync_fence_wait_async(struct sync_fence
*fence
,
433 void (*callback
)(struct sync_fence
*, void *data
),
436 struct sync_fence_waiter
*waiter
;
440 waiter
= kzalloc(sizeof(struct sync_fence_waiter
), GFP_KERNEL
);
444 waiter
->callback
= callback
;
445 waiter
->callback_data
= callback_data
;
447 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
455 list_add_tail(&waiter
->waiter_list
, &fence
->waiter_list_head
);
457 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
462 int sync_fence_wait(struct sync_fence
*fence
, long timeout
)
467 timeout
= msecs_to_jiffies(timeout
);
468 err
= wait_event_interruptible_timeout(fence
->wq
,
472 err
= wait_event_interruptible(fence
->wq
, fence
->status
!= 0);
478 if (fence
->status
< 0)
479 return fence
->status
;
481 if (fence
->status
== 0)
487 static int sync_fence_release(struct inode
*inode
, struct file
*file
)
489 struct sync_fence
*fence
= file
->private_data
;
492 sync_fence_free_pts(fence
);
494 spin_lock_irqsave(&sync_fence_list_lock
, flags
);
495 list_del(&fence
->sync_fence_list
);
496 spin_unlock_irqrestore(&sync_fence_list_lock
, flags
);
503 static unsigned int sync_fence_poll(struct file
*file
, poll_table
*wait
)
505 struct sync_fence
*fence
= file
->private_data
;
507 poll_wait(file
, &fence
->wq
, wait
);
509 if (fence
->status
== 1)
511 else if (fence
->status
< 0)
517 static long sync_fence_ioctl_wait(struct sync_fence
*fence
, unsigned long arg
)
521 if (copy_from_user(&value
, (void __user
*)arg
, sizeof(value
)))
524 return sync_fence_wait(fence
, value
);
527 static long sync_fence_ioctl_merge(struct sync_fence
*fence
, unsigned long arg
)
529 int fd
= get_unused_fd();
531 struct sync_fence
*fence2
, *fence3
;
532 struct sync_merge_data data
;
534 if (copy_from_user(&data
, (void __user
*)arg
, sizeof(data
)))
537 fence2
= sync_fence_fdget(data
.fd2
);
538 if (fence2
== NULL
) {
543 data
.name
[sizeof(data
.name
) - 1] = '\0';
544 fence3
= sync_fence_merge(data
.name
, fence
, fence2
);
545 if (fence3
== NULL
) {
551 if (copy_to_user((void __user
*)arg
, &data
, sizeof(data
))) {
556 sync_fence_install(fence3
, fd
);
557 sync_fence_put(fence2
);
561 sync_fence_put(fence3
);
564 sync_fence_put(fence2
);
571 int sync_fill_pt_info(struct sync_pt
*pt
, void *data
, int size
)
573 struct sync_pt_info
*info
= data
;
576 if (size
< sizeof(struct sync_pt_info
))
579 info
->len
= sizeof(struct sync_pt_info
);
581 if (pt
->parent
->ops
->fill_driver_data
) {
582 ret
= pt
->parent
->ops
->fill_driver_data(pt
, info
->driver_data
,
583 size
- sizeof(*info
));
590 strlcpy(info
->obj_name
, pt
->parent
->name
, sizeof(info
->obj_name
));
591 strlcpy(info
->driver_name
, pt
->parent
->ops
->driver_name
,
592 sizeof(info
->driver_name
));
593 info
->status
= pt
->status
;
594 info
->timestamp_ns
= ktime_to_ns(pt
->timestamp
);
600 static long sync_fence_ioctl_fence_info(struct sync_fence
*fence
,
603 struct sync_fence_info_data
*data
;
604 struct list_head
*pos
;
609 if (copy_from_user(&size
, (void __user
*)arg
, sizeof(size
)))
612 if (size
< sizeof(struct sync_fence_info_data
))
618 data
= kzalloc(size
, GFP_KERNEL
);
622 strlcpy(data
->name
, fence
->name
, sizeof(data
->name
));
623 data
->status
= fence
->status
;
624 len
= sizeof(struct sync_fence_info_data
);
626 list_for_each(pos
, &fence
->pt_list_head
) {
628 container_of(pos
, struct sync_pt
, pt_list
);
630 ret
= sync_fill_pt_info(pt
, (u8
*)data
+ len
, size
- len
);
640 if (copy_to_user((void __user
*)arg
, data
, len
))
651 static long sync_fence_ioctl(struct file
*file
, unsigned int cmd
,
654 struct sync_fence
*fence
= file
->private_data
;
657 return sync_fence_ioctl_wait(fence
, arg
);
660 return sync_fence_ioctl_merge(fence
, arg
);
662 case SYNC_IOC_FENCE_INFO
:
663 return sync_fence_ioctl_fence_info(fence
, arg
);
670 #ifdef CONFIG_DEBUG_FS
671 static const char *sync_status_str(int status
)
675 else if (status
== 0)
681 static void sync_print_pt(struct seq_file
*s
, struct sync_pt
*pt
, bool fence
)
683 int status
= pt
->status
;
684 seq_printf(s
, " %s%spt %s",
685 fence
? pt
->parent
->name
: "",
687 sync_status_str(status
));
689 struct timeval tv
= ktime_to_timeval(pt
->timestamp
);
690 seq_printf(s
, "@%ld.%06ld", tv
.tv_sec
, tv
.tv_usec
);
693 if (pt
->parent
->ops
->print_pt
) {
695 pt
->parent
->ops
->print_pt(s
, pt
);
701 static void sync_print_obj(struct seq_file
*s
, struct sync_timeline
*obj
)
703 struct list_head
*pos
;
706 seq_printf(s
, "%s %s", obj
->name
, obj
->ops
->driver_name
);
708 if (obj
->ops
->print_obj
) {
710 obj
->ops
->print_obj(s
, obj
);
715 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
716 list_for_each(pos
, &obj
->child_list_head
) {
718 container_of(pos
, struct sync_pt
, child_list
);
719 sync_print_pt(s
, pt
, false);
721 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
724 static void sync_print_fence(struct seq_file
*s
, struct sync_fence
*fence
)
726 struct list_head
*pos
;
729 seq_printf(s
, "%s: %s\n", fence
->name
, sync_status_str(fence
->status
));
731 list_for_each(pos
, &fence
->pt_list_head
) {
733 container_of(pos
, struct sync_pt
, pt_list
);
734 sync_print_pt(s
, pt
, true);
737 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
738 list_for_each(pos
, &fence
->waiter_list_head
) {
739 struct sync_fence_waiter
*waiter
=
740 container_of(pos
, struct sync_fence_waiter
,
743 seq_printf(s
, "waiter %pF %p\n", waiter
->callback
,
744 waiter
->callback_data
);
746 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
749 static int sync_debugfs_show(struct seq_file
*s
, void *unused
)
752 struct list_head
*pos
;
754 seq_printf(s
, "objs:\n--------------\n");
756 spin_lock_irqsave(&sync_timeline_list_lock
, flags
);
757 list_for_each(pos
, &sync_timeline_list_head
) {
758 struct sync_timeline
*obj
=
759 container_of(pos
, struct sync_timeline
,
762 sync_print_obj(s
, obj
);
765 spin_unlock_irqrestore(&sync_timeline_list_lock
, flags
);
767 seq_printf(s
, "fences:\n--------------\n");
769 spin_lock_irqsave(&sync_fence_list_lock
, flags
);
770 list_for_each(pos
, &sync_fence_list_head
) {
771 struct sync_fence
*fence
=
772 container_of(pos
, struct sync_fence
, sync_fence_list
);
774 sync_print_fence(s
, fence
);
777 spin_unlock_irqrestore(&sync_fence_list_lock
, flags
);
781 static int sync_debugfs_open(struct inode
*inode
, struct file
*file
)
783 return single_open(file
, sync_debugfs_show
, inode
->i_private
);
786 static const struct file_operations sync_debugfs_fops
= {
787 .open
= sync_debugfs_open
,
790 .release
= single_release
,
793 static __init
int sync_debugfs_init(void)
795 debugfs_create_file("sync", S_IRUGO
, NULL
, NULL
, &sync_debugfs_fops
);
799 late_initcall(sync_debugfs_init
);