4 * Copyright (C) 2012 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
31 #define CREATE_TRACE_POINTS
32 #include "trace/sync.h"
34 static const struct fence_ops android_fence_ops
;
35 static const struct file_operations sync_fence_fops
;
37 struct sync_timeline
*sync_timeline_create(const struct sync_timeline_ops
*ops
,
38 int size
, const char *name
)
40 struct sync_timeline
*obj
;
42 if (size
< sizeof(struct sync_timeline
))
45 obj
= kzalloc(size
, GFP_KERNEL
);
49 kref_init(&obj
->kref
);
51 obj
->context
= fence_context_alloc(1);
52 strlcpy(obj
->name
, name
, sizeof(obj
->name
));
54 INIT_LIST_HEAD(&obj
->child_list_head
);
55 INIT_LIST_HEAD(&obj
->active_list_head
);
56 spin_lock_init(&obj
->child_list_lock
);
58 sync_timeline_debug_add(obj
);
62 EXPORT_SYMBOL(sync_timeline_create
);
64 static void sync_timeline_free(struct kref
*kref
)
66 struct sync_timeline
*obj
=
67 container_of(kref
, struct sync_timeline
, kref
);
69 sync_timeline_debug_remove(obj
);
71 if (obj
->ops
->release_obj
)
72 obj
->ops
->release_obj(obj
);
77 static void sync_timeline_get(struct sync_timeline
*obj
)
82 static void sync_timeline_put(struct sync_timeline
*obj
)
84 kref_put(&obj
->kref
, sync_timeline_free
);
87 void sync_timeline_destroy(struct sync_timeline
*obj
)
89 obj
->destroyed
= true;
91 * Ensure timeline is marked as destroyed before
92 * changing timeline's fences status.
97 * signal any children that their parent is going away.
99 sync_timeline_signal(obj
);
100 sync_timeline_put(obj
);
102 EXPORT_SYMBOL(sync_timeline_destroy
);
104 void sync_timeline_signal(struct sync_timeline
*obj
)
107 LIST_HEAD(signaled_pts
);
108 struct sync_pt
*pt
, *next
;
110 trace_sync_timeline(obj
);
112 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
114 list_for_each_entry_safe(pt
, next
, &obj
->active_list_head
,
116 if (fence_is_signaled_locked(&pt
->base
))
117 list_del_init(&pt
->active_list
);
120 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
122 EXPORT_SYMBOL(sync_timeline_signal
);
124 struct sync_pt
*sync_pt_create(struct sync_timeline
*obj
, int size
)
129 if (size
< sizeof(struct sync_pt
))
132 pt
= kzalloc(size
, GFP_KERNEL
);
136 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
137 sync_timeline_get(obj
);
138 fence_init(&pt
->base
, &android_fence_ops
, &obj
->child_list_lock
,
139 obj
->context
, ++obj
->value
);
140 list_add_tail(&pt
->child_list
, &obj
->child_list_head
);
141 INIT_LIST_HEAD(&pt
->active_list
);
142 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
145 EXPORT_SYMBOL(sync_pt_create
);
147 void sync_pt_free(struct sync_pt
*pt
)
149 fence_put(&pt
->base
);
151 EXPORT_SYMBOL(sync_pt_free
);
153 static struct sync_fence
*sync_fence_alloc(int size
, const char *name
)
155 struct sync_fence
*fence
;
157 fence
= kzalloc(size
, GFP_KERNEL
);
161 fence
->file
= anon_inode_getfile("sync_fence", &sync_fence_fops
,
163 if (IS_ERR(fence
->file
))
166 kref_init(&fence
->kref
);
167 strlcpy(fence
->name
, name
, sizeof(fence
->name
));
169 init_waitqueue_head(&fence
->wq
);
178 static void fence_check_cb_func(struct fence
*f
, struct fence_cb
*cb
)
180 struct sync_fence_cb
*check
;
181 struct sync_fence
*fence
;
183 check
= container_of(cb
, struct sync_fence_cb
, cb
);
184 fence
= check
->fence
;
186 if (atomic_dec_and_test(&fence
->status
))
187 wake_up_all(&fence
->wq
);
190 /* TODO: implement a create which takes more that one sync_pt */
191 struct sync_fence
*sync_fence_create(const char *name
, struct sync_pt
*pt
)
193 struct sync_fence
*fence
;
195 fence
= sync_fence_alloc(offsetof(struct sync_fence
, cbs
[1]), name
);
199 fence
->num_fences
= 1;
200 atomic_set(&fence
->status
, 1);
202 fence
->cbs
[0].sync_pt
= &pt
->base
;
203 fence
->cbs
[0].fence
= fence
;
204 if (fence_add_callback(&pt
->base
, &fence
->cbs
[0].cb
,
205 fence_check_cb_func
))
206 atomic_dec(&fence
->status
);
208 sync_fence_debug_add(fence
);
212 EXPORT_SYMBOL(sync_fence_create
);
214 struct sync_fence
*sync_fence_fdget(int fd
)
216 struct file
*file
= fget(fd
);
221 if (file
->f_op
!= &sync_fence_fops
)
224 return file
->private_data
;
230 EXPORT_SYMBOL(sync_fence_fdget
);
232 void sync_fence_put(struct sync_fence
*fence
)
236 EXPORT_SYMBOL(sync_fence_put
);
238 void sync_fence_install(struct sync_fence
*fence
, int fd
)
240 fd_install(fd
, fence
->file
);
242 EXPORT_SYMBOL(sync_fence_install
);
244 static void sync_fence_add_pt(struct sync_fence
*fence
,
245 int *i
, struct fence
*pt
)
247 fence
->cbs
[*i
].sync_pt
= pt
;
248 fence
->cbs
[*i
].fence
= fence
;
250 if (!fence_add_callback(pt
, &fence
->cbs
[*i
].cb
, fence_check_cb_func
)) {
256 struct sync_fence
*sync_fence_merge(const char *name
,
257 struct sync_fence
*a
, struct sync_fence
*b
)
259 int num_fences
= a
->num_fences
+ b
->num_fences
;
260 struct sync_fence
*fence
;
262 unsigned long size
= offsetof(struct sync_fence
, cbs
[num_fences
]);
264 fence
= sync_fence_alloc(size
, name
);
268 atomic_set(&fence
->status
, num_fences
);
271 * Assume sync_fence a and b are both ordered and have no
272 * duplicates with the same context.
274 * If a sync_fence can only be created with sync_fence_merge
275 * and sync_fence_create, this is a reasonable assumption.
277 for (i
= i_a
= i_b
= 0; i_a
< a
->num_fences
&& i_b
< b
->num_fences
; ) {
278 struct fence
*pt_a
= a
->cbs
[i_a
].sync_pt
;
279 struct fence
*pt_b
= b
->cbs
[i_b
].sync_pt
;
281 if (pt_a
->context
< pt_b
->context
) {
282 sync_fence_add_pt(fence
, &i
, pt_a
);
285 } else if (pt_a
->context
> pt_b
->context
) {
286 sync_fence_add_pt(fence
, &i
, pt_b
);
290 if (pt_a
->seqno
- pt_b
->seqno
<= INT_MAX
)
291 sync_fence_add_pt(fence
, &i
, pt_a
);
293 sync_fence_add_pt(fence
, &i
, pt_b
);
300 for (; i_a
< a
->num_fences
; i_a
++)
301 sync_fence_add_pt(fence
, &i
, a
->cbs
[i_a
].sync_pt
);
303 for (; i_b
< b
->num_fences
; i_b
++)
304 sync_fence_add_pt(fence
, &i
, b
->cbs
[i_b
].sync_pt
);
307 atomic_sub(num_fences
- i
, &fence
->status
);
308 fence
->num_fences
= i
;
310 sync_fence_debug_add(fence
);
313 EXPORT_SYMBOL(sync_fence_merge
);
315 int sync_fence_wake_up_wq(wait_queue_t
*curr
, unsigned mode
,
316 int wake_flags
, void *key
)
318 struct sync_fence_waiter
*wait
;
320 wait
= container_of(curr
, struct sync_fence_waiter
, work
);
321 list_del_init(&wait
->work
.task_list
);
323 wait
->callback(wait
->work
.private, wait
);
327 int sync_fence_wait_async(struct sync_fence
*fence
,
328 struct sync_fence_waiter
*waiter
)
330 int err
= atomic_read(&fence
->status
);
339 init_waitqueue_func_entry(&waiter
->work
, sync_fence_wake_up_wq
);
340 waiter
->work
.private = fence
;
342 spin_lock_irqsave(&fence
->wq
.lock
, flags
);
343 err
= atomic_read(&fence
->status
);
345 __add_wait_queue_tail(&fence
->wq
, &waiter
->work
);
346 spin_unlock_irqrestore(&fence
->wq
.lock
, flags
);
353 EXPORT_SYMBOL(sync_fence_wait_async
);
355 int sync_fence_cancel_async(struct sync_fence
*fence
,
356 struct sync_fence_waiter
*waiter
)
361 spin_lock_irqsave(&fence
->wq
.lock
, flags
);
362 if (!list_empty(&waiter
->work
.task_list
))
363 list_del_init(&waiter
->work
.task_list
);
366 spin_unlock_irqrestore(&fence
->wq
.lock
, flags
);
369 EXPORT_SYMBOL(sync_fence_cancel_async
);
371 int sync_fence_wait(struct sync_fence
*fence
, long timeout
)
377 timeout
= MAX_SCHEDULE_TIMEOUT
;
379 timeout
= msecs_to_jiffies(timeout
);
381 trace_sync_wait(fence
, 1);
382 for (i
= 0; i
< fence
->num_fences
; ++i
)
383 trace_sync_pt(fence
->cbs
[i
].sync_pt
);
384 ret
= wait_event_interruptible_timeout(fence
->wq
,
385 atomic_read(&fence
->status
) <= 0,
387 trace_sync_wait(fence
, 0);
391 } else if (ret
== 0) {
393 pr_info("fence timeout on [%p] after %dms\n", fence
,
394 jiffies_to_msecs(timeout
));
400 ret
= atomic_read(&fence
->status
);
402 pr_info("fence error %ld on [%p]\n", ret
, fence
);
407 EXPORT_SYMBOL(sync_fence_wait
);
409 static const char *android_fence_get_driver_name(struct fence
*fence
)
411 struct sync_pt
*pt
= container_of(fence
, struct sync_pt
, base
);
412 struct sync_timeline
*parent
= sync_pt_parent(pt
);
414 return parent
->ops
->driver_name
;
417 static const char *android_fence_get_timeline_name(struct fence
*fence
)
419 struct sync_pt
*pt
= container_of(fence
, struct sync_pt
, base
);
420 struct sync_timeline
*parent
= sync_pt_parent(pt
);
425 static void android_fence_release(struct fence
*fence
)
427 struct sync_pt
*pt
= container_of(fence
, struct sync_pt
, base
);
428 struct sync_timeline
*parent
= sync_pt_parent(pt
);
431 spin_lock_irqsave(fence
->lock
, flags
);
432 list_del(&pt
->child_list
);
433 if (WARN_ON_ONCE(!list_empty(&pt
->active_list
)))
434 list_del(&pt
->active_list
);
435 spin_unlock_irqrestore(fence
->lock
, flags
);
437 if (parent
->ops
->free_pt
)
438 parent
->ops
->free_pt(pt
);
440 sync_timeline_put(parent
);
441 fence_free(&pt
->base
);
444 static bool android_fence_signaled(struct fence
*fence
)
446 struct sync_pt
*pt
= container_of(fence
, struct sync_pt
, base
);
447 struct sync_timeline
*parent
= sync_pt_parent(pt
);
450 ret
= parent
->ops
->has_signaled(pt
);
456 static bool android_fence_enable_signaling(struct fence
*fence
)
458 struct sync_pt
*pt
= container_of(fence
, struct sync_pt
, base
);
459 struct sync_timeline
*parent
= sync_pt_parent(pt
);
461 if (android_fence_signaled(fence
))
464 list_add_tail(&pt
->active_list
, &parent
->active_list_head
);
468 static int android_fence_fill_driver_data(struct fence
*fence
,
469 void *data
, int size
)
471 struct sync_pt
*pt
= container_of(fence
, struct sync_pt
, base
);
472 struct sync_timeline
*parent
= sync_pt_parent(pt
);
474 if (!parent
->ops
->fill_driver_data
)
476 return parent
->ops
->fill_driver_data(pt
, data
, size
);
479 static void android_fence_value_str(struct fence
*fence
,
482 struct sync_pt
*pt
= container_of(fence
, struct sync_pt
, base
);
483 struct sync_timeline
*parent
= sync_pt_parent(pt
);
485 if (!parent
->ops
->pt_value_str
) {
490 parent
->ops
->pt_value_str(pt
, str
, size
);
493 static void android_fence_timeline_value_str(struct fence
*fence
,
496 struct sync_pt
*pt
= container_of(fence
, struct sync_pt
, base
);
497 struct sync_timeline
*parent
= sync_pt_parent(pt
);
499 if (!parent
->ops
->timeline_value_str
) {
504 parent
->ops
->timeline_value_str(parent
, str
, size
);
507 static const struct fence_ops android_fence_ops
= {
508 .get_driver_name
= android_fence_get_driver_name
,
509 .get_timeline_name
= android_fence_get_timeline_name
,
510 .enable_signaling
= android_fence_enable_signaling
,
511 .signaled
= android_fence_signaled
,
512 .wait
= fence_default_wait
,
513 .release
= android_fence_release
,
514 .fill_driver_data
= android_fence_fill_driver_data
,
515 .fence_value_str
= android_fence_value_str
,
516 .timeline_value_str
= android_fence_timeline_value_str
,
519 static void sync_fence_free(struct kref
*kref
)
521 struct sync_fence
*fence
= container_of(kref
, struct sync_fence
, kref
);
522 int i
, status
= atomic_read(&fence
->status
);
524 for (i
= 0; i
< fence
->num_fences
; ++i
) {
526 fence_remove_callback(fence
->cbs
[i
].sync_pt
,
528 fence_put(fence
->cbs
[i
].sync_pt
);
534 static int sync_fence_release(struct inode
*inode
, struct file
*file
)
536 struct sync_fence
*fence
= file
->private_data
;
538 sync_fence_debug_remove(fence
);
540 kref_put(&fence
->kref
, sync_fence_free
);
544 static unsigned int sync_fence_poll(struct file
*file
, poll_table
*wait
)
546 struct sync_fence
*fence
= file
->private_data
;
549 poll_wait(file
, &fence
->wq
, wait
);
551 status
= atomic_read(&fence
->status
);
560 static long sync_fence_ioctl_wait(struct sync_fence
*fence
, unsigned long arg
)
564 if (copy_from_user(&value
, (void __user
*)arg
, sizeof(value
)))
567 return sync_fence_wait(fence
, value
);
570 static long sync_fence_ioctl_merge(struct sync_fence
*fence
, unsigned long arg
)
572 int fd
= get_unused_fd_flags(O_CLOEXEC
);
574 struct sync_fence
*fence2
, *fence3
;
575 struct sync_merge_data data
;
580 if (copy_from_user(&data
, (void __user
*)arg
, sizeof(data
))) {
585 fence2
= sync_fence_fdget(data
.fd2
);
586 if (fence2
== NULL
) {
591 data
.name
[sizeof(data
.name
) - 1] = '\0';
592 fence3
= sync_fence_merge(data
.name
, fence
, fence2
);
593 if (fence3
== NULL
) {
599 if (copy_to_user((void __user
*)arg
, &data
, sizeof(data
))) {
604 sync_fence_install(fence3
, fd
);
605 sync_fence_put(fence2
);
609 sync_fence_put(fence3
);
612 sync_fence_put(fence2
);
619 static int sync_fill_pt_info(struct fence
*fence
, void *data
, int size
)
621 struct sync_pt_info
*info
= data
;
624 if (size
< sizeof(struct sync_pt_info
))
627 info
->len
= sizeof(struct sync_pt_info
);
629 if (fence
->ops
->fill_driver_data
) {
630 ret
= fence
->ops
->fill_driver_data(fence
, info
->driver_data
,
631 size
- sizeof(*info
));
638 strlcpy(info
->obj_name
, fence
->ops
->get_timeline_name(fence
),
639 sizeof(info
->obj_name
));
640 strlcpy(info
->driver_name
, fence
->ops
->get_driver_name(fence
),
641 sizeof(info
->driver_name
));
642 if (fence_is_signaled(fence
))
643 info
->status
= fence
->status
>= 0 ? 1 : fence
->status
;
646 info
->timestamp_ns
= ktime_to_ns(fence
->timestamp
);
651 static long sync_fence_ioctl_fence_info(struct sync_fence
*fence
,
654 struct sync_fence_info_data
*data
;
659 if (copy_from_user(&size
, (void __user
*)arg
, sizeof(size
)))
662 if (size
< sizeof(struct sync_fence_info_data
))
668 data
= kzalloc(size
, GFP_KERNEL
);
672 strlcpy(data
->name
, fence
->name
, sizeof(data
->name
));
673 data
->status
= atomic_read(&fence
->status
);
674 if (data
->status
>= 0)
675 data
->status
= !data
->status
;
677 len
= sizeof(struct sync_fence_info_data
);
679 for (i
= 0; i
< fence
->num_fences
; ++i
) {
680 struct fence
*pt
= fence
->cbs
[i
].sync_pt
;
682 ret
= sync_fill_pt_info(pt
, (u8
*)data
+ len
, size
- len
);
692 if (copy_to_user((void __user
*)arg
, data
, len
))
703 static long sync_fence_ioctl(struct file
*file
, unsigned int cmd
,
706 struct sync_fence
*fence
= file
->private_data
;
710 return sync_fence_ioctl_wait(fence
, arg
);
713 return sync_fence_ioctl_merge(fence
, arg
);
715 case SYNC_IOC_FENCE_INFO
:
716 return sync_fence_ioctl_fence_info(fence
, arg
);
723 static const struct file_operations sync_fence_fops
= {
724 .release
= sync_fence_release
,
725 .poll
= sync_fence_poll
,
726 .unlocked_ioctl
= sync_fence_ioctl
,
727 .compat_ioctl
= sync_fence_ioctl
,