jme: Do not enable NIC WoL functions on S0
[linux/fpc-iii.git] / drivers / staging / android / sync.c
blob18174f7c871c46f0e31fd8e41f235c036323fc67
1 /*
2 * drivers/base/sync.c
4 * Copyright (C) 2012 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
29 #include "sync.h"
31 #define CREATE_TRACE_POINTS
32 #include "trace/sync.h"
34 static void sync_fence_signal_pt(struct sync_pt *pt);
35 static int _sync_pt_has_signaled(struct sync_pt *pt);
36 static void sync_fence_free(struct kref *kref);
37 static void sync_dump(void);
39 static LIST_HEAD(sync_timeline_list_head);
40 static DEFINE_SPINLOCK(sync_timeline_list_lock);
42 static LIST_HEAD(sync_fence_list_head);
43 static DEFINE_SPINLOCK(sync_fence_list_lock);
45 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
46 int size, const char *name)
48 struct sync_timeline *obj;
49 unsigned long flags;
51 if (size < sizeof(struct sync_timeline))
52 return NULL;
54 obj = kzalloc(size, GFP_KERNEL);
55 if (obj == NULL)
56 return NULL;
58 kref_init(&obj->kref);
59 obj->ops = ops;
60 strlcpy(obj->name, name, sizeof(obj->name));
62 INIT_LIST_HEAD(&obj->child_list_head);
63 spin_lock_init(&obj->child_list_lock);
65 INIT_LIST_HEAD(&obj->active_list_head);
66 spin_lock_init(&obj->active_list_lock);
68 spin_lock_irqsave(&sync_timeline_list_lock, flags);
69 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
70 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
72 return obj;
74 EXPORT_SYMBOL(sync_timeline_create);
76 static void sync_timeline_free(struct kref *kref)
78 struct sync_timeline *obj =
79 container_of(kref, struct sync_timeline, kref);
80 unsigned long flags;
82 spin_lock_irqsave(&sync_timeline_list_lock, flags);
83 list_del(&obj->sync_timeline_list);
84 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
86 if (obj->ops->release_obj)
87 obj->ops->release_obj(obj);
89 kfree(obj);
92 void sync_timeline_destroy(struct sync_timeline *obj)
94 obj->destroyed = true;
96 * Ensure timeline is marked as destroyed before
97 * changing timeline's fences status.
99 smp_wmb();
102 * signal any children that their parent is going away.
104 sync_timeline_signal(obj);
106 kref_put(&obj->kref, sync_timeline_free);
108 EXPORT_SYMBOL(sync_timeline_destroy);
110 static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
112 unsigned long flags;
114 pt->parent = obj;
116 spin_lock_irqsave(&obj->child_list_lock, flags);
117 list_add_tail(&pt->child_list, &obj->child_list_head);
118 spin_unlock_irqrestore(&obj->child_list_lock, flags);
121 static void sync_timeline_remove_pt(struct sync_pt *pt)
123 struct sync_timeline *obj = pt->parent;
124 unsigned long flags;
126 spin_lock_irqsave(&obj->active_list_lock, flags);
127 if (!list_empty(&pt->active_list))
128 list_del_init(&pt->active_list);
129 spin_unlock_irqrestore(&obj->active_list_lock, flags);
131 spin_lock_irqsave(&obj->child_list_lock, flags);
132 if (!list_empty(&pt->child_list))
133 list_del_init(&pt->child_list);
135 spin_unlock_irqrestore(&obj->child_list_lock, flags);
138 void sync_timeline_signal(struct sync_timeline *obj)
140 unsigned long flags;
141 LIST_HEAD(signaled_pts);
142 struct list_head *pos, *n;
144 trace_sync_timeline(obj);
146 spin_lock_irqsave(&obj->active_list_lock, flags);
148 list_for_each_safe(pos, n, &obj->active_list_head) {
149 struct sync_pt *pt =
150 container_of(pos, struct sync_pt, active_list);
152 if (_sync_pt_has_signaled(pt)) {
153 list_del_init(pos);
154 list_add(&pt->signaled_list, &signaled_pts);
155 kref_get(&pt->fence->kref);
159 spin_unlock_irqrestore(&obj->active_list_lock, flags);
161 list_for_each_safe(pos, n, &signaled_pts) {
162 struct sync_pt *pt =
163 container_of(pos, struct sync_pt, signaled_list);
165 list_del_init(pos);
166 sync_fence_signal_pt(pt);
167 kref_put(&pt->fence->kref, sync_fence_free);
170 EXPORT_SYMBOL(sync_timeline_signal);
172 struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
174 struct sync_pt *pt;
176 if (size < sizeof(struct sync_pt))
177 return NULL;
179 pt = kzalloc(size, GFP_KERNEL);
180 if (pt == NULL)
181 return NULL;
183 INIT_LIST_HEAD(&pt->active_list);
184 kref_get(&parent->kref);
185 sync_timeline_add_pt(parent, pt);
187 return pt;
189 EXPORT_SYMBOL(sync_pt_create);
191 void sync_pt_free(struct sync_pt *pt)
193 if (pt->parent->ops->free_pt)
194 pt->parent->ops->free_pt(pt);
196 sync_timeline_remove_pt(pt);
198 kref_put(&pt->parent->kref, sync_timeline_free);
200 kfree(pt);
202 EXPORT_SYMBOL(sync_pt_free);
204 /* call with pt->parent->active_list_lock held */
205 static int _sync_pt_has_signaled(struct sync_pt *pt)
207 int old_status = pt->status;
209 if (!pt->status)
210 pt->status = pt->parent->ops->has_signaled(pt);
212 if (!pt->status && pt->parent->destroyed)
213 pt->status = -ENOENT;
215 if (pt->status != old_status)
216 pt->timestamp = ktime_get();
218 return pt->status;
221 static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
223 return pt->parent->ops->dup(pt);
226 /* Adds a sync pt to the active queue. Called when added to a fence */
227 static void sync_pt_activate(struct sync_pt *pt)
229 struct sync_timeline *obj = pt->parent;
230 unsigned long flags;
231 int err;
233 spin_lock_irqsave(&obj->active_list_lock, flags);
235 err = _sync_pt_has_signaled(pt);
236 if (err != 0)
237 goto out;
239 list_add_tail(&pt->active_list, &obj->active_list_head);
241 out:
242 spin_unlock_irqrestore(&obj->active_list_lock, flags);
245 static int sync_fence_release(struct inode *inode, struct file *file);
246 static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
247 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
248 unsigned long arg);
251 static const struct file_operations sync_fence_fops = {
252 .release = sync_fence_release,
253 .poll = sync_fence_poll,
254 .unlocked_ioctl = sync_fence_ioctl,
255 .compat_ioctl = sync_fence_ioctl,
258 static struct sync_fence *sync_fence_alloc(const char *name)
260 struct sync_fence *fence;
261 unsigned long flags;
263 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
264 if (fence == NULL)
265 return NULL;
267 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
268 fence, 0);
269 if (IS_ERR(fence->file))
270 goto err;
272 kref_init(&fence->kref);
273 strlcpy(fence->name, name, sizeof(fence->name));
275 INIT_LIST_HEAD(&fence->pt_list_head);
276 INIT_LIST_HEAD(&fence->waiter_list_head);
277 spin_lock_init(&fence->waiter_list_lock);
279 init_waitqueue_head(&fence->wq);
281 spin_lock_irqsave(&sync_fence_list_lock, flags);
282 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
283 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
285 return fence;
287 err:
288 kfree(fence);
289 return NULL;
292 /* TODO: implement a create which takes more that one sync_pt */
293 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
295 struct sync_fence *fence;
297 if (pt->fence)
298 return NULL;
300 fence = sync_fence_alloc(name);
301 if (fence == NULL)
302 return NULL;
304 pt->fence = fence;
305 list_add(&pt->pt_list, &fence->pt_list_head);
306 sync_pt_activate(pt);
309 * signal the fence in case pt was activated before
310 * sync_pt_activate(pt) was called
312 sync_fence_signal_pt(pt);
314 return fence;
316 EXPORT_SYMBOL(sync_fence_create);
318 static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
320 struct list_head *pos;
322 list_for_each(pos, &src->pt_list_head) {
323 struct sync_pt *orig_pt =
324 container_of(pos, struct sync_pt, pt_list);
325 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
327 if (new_pt == NULL)
328 return -ENOMEM;
330 new_pt->fence = dst;
331 list_add(&new_pt->pt_list, &dst->pt_list_head);
334 return 0;
337 static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
339 struct list_head *src_pos, *dst_pos, *n;
341 list_for_each(src_pos, &src->pt_list_head) {
342 struct sync_pt *src_pt =
343 container_of(src_pos, struct sync_pt, pt_list);
344 bool collapsed = false;
346 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
347 struct sync_pt *dst_pt =
348 container_of(dst_pos, struct sync_pt, pt_list);
349 /* collapse two sync_pts on the same timeline
350 * to a single sync_pt that will signal at
351 * the later of the two
353 if (dst_pt->parent == src_pt->parent) {
354 if (dst_pt->parent->ops->compare(dst_pt, src_pt)
355 == -1) {
356 struct sync_pt *new_pt =
357 sync_pt_dup(src_pt);
358 if (new_pt == NULL)
359 return -ENOMEM;
361 new_pt->fence = dst;
362 list_replace(&dst_pt->pt_list,
363 &new_pt->pt_list);
364 sync_pt_free(dst_pt);
366 collapsed = true;
367 break;
371 if (!collapsed) {
372 struct sync_pt *new_pt = sync_pt_dup(src_pt);
374 if (new_pt == NULL)
375 return -ENOMEM;
377 new_pt->fence = dst;
378 list_add(&new_pt->pt_list, &dst->pt_list_head);
382 return 0;
385 static void sync_fence_detach_pts(struct sync_fence *fence)
387 struct list_head *pos, *n;
389 list_for_each_safe(pos, n, &fence->pt_list_head) {
390 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
392 sync_timeline_remove_pt(pt);
396 static void sync_fence_free_pts(struct sync_fence *fence)
398 struct list_head *pos, *n;
400 list_for_each_safe(pos, n, &fence->pt_list_head) {
401 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
403 sync_pt_free(pt);
407 struct sync_fence *sync_fence_fdget(int fd)
409 struct file *file = fget(fd);
411 if (file == NULL)
412 return NULL;
414 if (file->f_op != &sync_fence_fops)
415 goto err;
417 return file->private_data;
419 err:
420 fput(file);
421 return NULL;
423 EXPORT_SYMBOL(sync_fence_fdget);
425 void sync_fence_put(struct sync_fence *fence)
427 fput(fence->file);
429 EXPORT_SYMBOL(sync_fence_put);
431 void sync_fence_install(struct sync_fence *fence, int fd)
433 fd_install(fd, fence->file);
435 EXPORT_SYMBOL(sync_fence_install);
437 static int sync_fence_get_status(struct sync_fence *fence)
439 struct list_head *pos;
440 int status = 1;
442 list_for_each(pos, &fence->pt_list_head) {
443 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
444 int pt_status = pt->status;
446 if (pt_status < 0) {
447 status = pt_status;
448 break;
449 } else if (status == 1) {
450 status = pt_status;
454 return status;
457 struct sync_fence *sync_fence_merge(const char *name,
458 struct sync_fence *a, struct sync_fence *b)
460 struct sync_fence *fence;
461 struct list_head *pos;
462 int err;
464 fence = sync_fence_alloc(name);
465 if (fence == NULL)
466 return NULL;
468 err = sync_fence_copy_pts(fence, a);
469 if (err < 0)
470 goto err;
472 err = sync_fence_merge_pts(fence, b);
473 if (err < 0)
474 goto err;
476 list_for_each(pos, &fence->pt_list_head) {
477 struct sync_pt *pt =
478 container_of(pos, struct sync_pt, pt_list);
479 sync_pt_activate(pt);
483 * signal the fence in case one of it's pts were activated before
484 * they were activated
486 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
487 struct sync_pt,
488 pt_list));
490 return fence;
491 err:
492 sync_fence_free_pts(fence);
493 kfree(fence);
494 return NULL;
496 EXPORT_SYMBOL(sync_fence_merge);
498 static void sync_fence_signal_pt(struct sync_pt *pt)
500 LIST_HEAD(signaled_waiters);
501 struct sync_fence *fence = pt->fence;
502 struct list_head *pos;
503 struct list_head *n;
504 unsigned long flags;
505 int status;
507 status = sync_fence_get_status(fence);
509 spin_lock_irqsave(&fence->waiter_list_lock, flags);
511 * this should protect against two threads racing on the signaled
512 * false -> true transition
514 if (status && !fence->status) {
515 list_for_each_safe(pos, n, &fence->waiter_list_head)
516 list_move(pos, &signaled_waiters);
518 fence->status = status;
519 } else {
520 status = 0;
522 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
524 if (status) {
525 list_for_each_safe(pos, n, &signaled_waiters) {
526 struct sync_fence_waiter *waiter =
527 container_of(pos, struct sync_fence_waiter,
528 waiter_list);
530 list_del(pos);
531 waiter->callback(fence, waiter);
533 wake_up(&fence->wq);
537 int sync_fence_wait_async(struct sync_fence *fence,
538 struct sync_fence_waiter *waiter)
540 unsigned long flags;
541 int err = 0;
543 spin_lock_irqsave(&fence->waiter_list_lock, flags);
545 if (fence->status) {
546 err = fence->status;
547 goto out;
550 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
551 out:
552 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
554 return err;
556 EXPORT_SYMBOL(sync_fence_wait_async);
558 int sync_fence_cancel_async(struct sync_fence *fence,
559 struct sync_fence_waiter *waiter)
561 struct list_head *pos;
562 struct list_head *n;
563 unsigned long flags;
564 int ret = -ENOENT;
566 spin_lock_irqsave(&fence->waiter_list_lock, flags);
568 * Make sure waiter is still in waiter_list because it is possible for
569 * the waiter to be removed from the list while the callback is still
570 * pending.
572 list_for_each_safe(pos, n, &fence->waiter_list_head) {
573 struct sync_fence_waiter *list_waiter =
574 container_of(pos, struct sync_fence_waiter,
575 waiter_list);
576 if (list_waiter == waiter) {
577 list_del(pos);
578 ret = 0;
579 break;
582 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
583 return ret;
585 EXPORT_SYMBOL(sync_fence_cancel_async);
587 static bool sync_fence_check(struct sync_fence *fence)
590 * Make sure that reads to fence->status are ordered with the
591 * wait queue event triggering
593 smp_rmb();
594 return fence->status != 0;
597 int sync_fence_wait(struct sync_fence *fence, long timeout)
599 int err = 0;
600 struct sync_pt *pt;
602 trace_sync_wait(fence, 1);
603 list_for_each_entry(pt, &fence->pt_list_head, pt_list)
604 trace_sync_pt(pt);
606 if (timeout > 0) {
607 timeout = msecs_to_jiffies(timeout);
608 err = wait_event_interruptible_timeout(fence->wq,
609 sync_fence_check(fence),
610 timeout);
611 } else if (timeout < 0) {
612 err = wait_event_interruptible(fence->wq,
613 sync_fence_check(fence));
615 trace_sync_wait(fence, 0);
617 if (err < 0)
618 return err;
620 if (fence->status < 0) {
621 pr_info("fence error %d on [%p]\n", fence->status, fence);
622 sync_dump();
623 return fence->status;
626 if (fence->status == 0) {
627 if (timeout > 0) {
628 pr_info("fence timeout on [%p] after %dms\n", fence,
629 jiffies_to_msecs(timeout));
630 sync_dump();
632 return -ETIME;
635 return 0;
637 EXPORT_SYMBOL(sync_fence_wait);
639 static void sync_fence_free(struct kref *kref)
641 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
643 sync_fence_free_pts(fence);
645 kfree(fence);
648 static int sync_fence_release(struct inode *inode, struct file *file)
650 struct sync_fence *fence = file->private_data;
651 unsigned long flags;
654 * We need to remove all ways to access this fence before droping
655 * our ref.
657 * start with its membership in the global fence list
659 spin_lock_irqsave(&sync_fence_list_lock, flags);
660 list_del(&fence->sync_fence_list);
661 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
664 * remove its pts from their parents so that sync_timeline_signal()
665 * can't reference the fence.
667 sync_fence_detach_pts(fence);
669 kref_put(&fence->kref, sync_fence_free);
671 return 0;
674 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
676 struct sync_fence *fence = file->private_data;
678 poll_wait(file, &fence->wq, wait);
681 * Make sure that reads to fence->status are ordered with the
682 * wait queue event triggering
684 smp_rmb();
686 if (fence->status == 1)
687 return POLLIN;
688 else if (fence->status < 0)
689 return POLLERR;
690 else
691 return 0;
694 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
696 __s32 value;
698 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
699 return -EFAULT;
701 return sync_fence_wait(fence, value);
704 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
706 int fd = get_unused_fd_flags(O_CLOEXEC);
707 int err;
708 struct sync_fence *fence2, *fence3;
709 struct sync_merge_data data;
711 if (fd < 0)
712 return fd;
714 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
715 err = -EFAULT;
716 goto err_put_fd;
719 fence2 = sync_fence_fdget(data.fd2);
720 if (fence2 == NULL) {
721 err = -ENOENT;
722 goto err_put_fd;
725 data.name[sizeof(data.name) - 1] = '\0';
726 fence3 = sync_fence_merge(data.name, fence, fence2);
727 if (fence3 == NULL) {
728 err = -ENOMEM;
729 goto err_put_fence2;
732 data.fence = fd;
733 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
734 err = -EFAULT;
735 goto err_put_fence3;
738 sync_fence_install(fence3, fd);
739 sync_fence_put(fence2);
740 return 0;
742 err_put_fence3:
743 sync_fence_put(fence3);
745 err_put_fence2:
746 sync_fence_put(fence2);
748 err_put_fd:
749 put_unused_fd(fd);
750 return err;
753 static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
755 struct sync_pt_info *info = data;
756 int ret;
758 if (size < sizeof(struct sync_pt_info))
759 return -ENOMEM;
761 info->len = sizeof(struct sync_pt_info);
763 if (pt->parent->ops->fill_driver_data) {
764 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
765 size - sizeof(*info));
766 if (ret < 0)
767 return ret;
769 info->len += ret;
772 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
773 strlcpy(info->driver_name, pt->parent->ops->driver_name,
774 sizeof(info->driver_name));
775 info->status = pt->status;
776 info->timestamp_ns = ktime_to_ns(pt->timestamp);
778 return info->len;
781 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
782 unsigned long arg)
784 struct sync_fence_info_data *data;
785 struct list_head *pos;
786 __u32 size;
787 __u32 len = 0;
788 int ret;
790 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
791 return -EFAULT;
793 if (size < sizeof(struct sync_fence_info_data))
794 return -EINVAL;
796 if (size > 4096)
797 size = 4096;
799 data = kzalloc(size, GFP_KERNEL);
800 if (data == NULL)
801 return -ENOMEM;
803 strlcpy(data->name, fence->name, sizeof(data->name));
804 data->status = fence->status;
805 len = sizeof(struct sync_fence_info_data);
807 list_for_each(pos, &fence->pt_list_head) {
808 struct sync_pt *pt =
809 container_of(pos, struct sync_pt, pt_list);
811 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
813 if (ret < 0)
814 goto out;
816 len += ret;
819 data->len = len;
821 if (copy_to_user((void __user *)arg, data, len))
822 ret = -EFAULT;
823 else
824 ret = 0;
826 out:
827 kfree(data);
829 return ret;
832 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
833 unsigned long arg)
835 struct sync_fence *fence = file->private_data;
837 switch (cmd) {
838 case SYNC_IOC_WAIT:
839 return sync_fence_ioctl_wait(fence, arg);
841 case SYNC_IOC_MERGE:
842 return sync_fence_ioctl_merge(fence, arg);
844 case SYNC_IOC_FENCE_INFO:
845 return sync_fence_ioctl_fence_info(fence, arg);
847 default:
848 return -ENOTTY;
852 #ifdef CONFIG_DEBUG_FS
853 static const char *sync_status_str(int status)
855 if (status > 0)
856 return "signaled";
857 else if (status == 0)
858 return "active";
859 else
860 return "error";
863 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
865 int status = pt->status;
867 seq_printf(s, " %s%spt %s",
868 fence ? pt->parent->name : "",
869 fence ? "_" : "",
870 sync_status_str(status));
871 if (pt->status) {
872 struct timeval tv = ktime_to_timeval(pt->timestamp);
874 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
877 if (pt->parent->ops->timeline_value_str &&
878 pt->parent->ops->pt_value_str) {
879 char value[64];
881 pt->parent->ops->pt_value_str(pt, value, sizeof(value));
882 seq_printf(s, ": %s", value);
883 if (fence) {
884 pt->parent->ops->timeline_value_str(pt->parent, value,
885 sizeof(value));
886 seq_printf(s, " / %s", value);
888 } else if (pt->parent->ops->print_pt) {
889 seq_puts(s, ": ");
890 pt->parent->ops->print_pt(s, pt);
893 seq_puts(s, "\n");
896 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
898 struct list_head *pos;
899 unsigned long flags;
901 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
903 if (obj->ops->timeline_value_str) {
904 char value[64];
906 obj->ops->timeline_value_str(obj, value, sizeof(value));
907 seq_printf(s, ": %s", value);
908 } else if (obj->ops->print_obj) {
909 seq_puts(s, ": ");
910 obj->ops->print_obj(s, obj);
913 seq_puts(s, "\n");
915 spin_lock_irqsave(&obj->child_list_lock, flags);
916 list_for_each(pos, &obj->child_list_head) {
917 struct sync_pt *pt =
918 container_of(pos, struct sync_pt, child_list);
919 sync_print_pt(s, pt, false);
921 spin_unlock_irqrestore(&obj->child_list_lock, flags);
924 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
926 struct list_head *pos;
927 unsigned long flags;
929 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
930 sync_status_str(fence->status));
932 list_for_each(pos, &fence->pt_list_head) {
933 struct sync_pt *pt =
934 container_of(pos, struct sync_pt, pt_list);
935 sync_print_pt(s, pt, true);
938 spin_lock_irqsave(&fence->waiter_list_lock, flags);
939 list_for_each(pos, &fence->waiter_list_head) {
940 struct sync_fence_waiter *waiter =
941 container_of(pos, struct sync_fence_waiter,
942 waiter_list);
944 seq_printf(s, "waiter %pF\n", waiter->callback);
946 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
949 static int sync_debugfs_show(struct seq_file *s, void *unused)
951 unsigned long flags;
952 struct list_head *pos;
954 seq_puts(s, "objs:\n--------------\n");
956 spin_lock_irqsave(&sync_timeline_list_lock, flags);
957 list_for_each(pos, &sync_timeline_list_head) {
958 struct sync_timeline *obj =
959 container_of(pos, struct sync_timeline,
960 sync_timeline_list);
962 sync_print_obj(s, obj);
963 seq_puts(s, "\n");
965 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
967 seq_puts(s, "fences:\n--------------\n");
969 spin_lock_irqsave(&sync_fence_list_lock, flags);
970 list_for_each(pos, &sync_fence_list_head) {
971 struct sync_fence *fence =
972 container_of(pos, struct sync_fence, sync_fence_list);
974 sync_print_fence(s, fence);
975 seq_puts(s, "\n");
977 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
978 return 0;
981 static int sync_debugfs_open(struct inode *inode, struct file *file)
983 return single_open(file, sync_debugfs_show, inode->i_private);
986 static const struct file_operations sync_debugfs_fops = {
987 .open = sync_debugfs_open,
988 .read = seq_read,
989 .llseek = seq_lseek,
990 .release = single_release,
993 static __init int sync_debugfs_init(void)
995 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
996 return 0;
998 late_initcall(sync_debugfs_init);
1000 #define DUMP_CHUNK 256
1001 static char sync_dump_buf[64 * 1024];
1002 static void sync_dump(void)
1004 struct seq_file s = {
1005 .buf = sync_dump_buf,
1006 .size = sizeof(sync_dump_buf) - 1,
1008 int i;
1010 sync_debugfs_show(&s, NULL);
1012 for (i = 0; i < s.count; i += DUMP_CHUNK) {
1013 if ((s.count - i) > DUMP_CHUNK) {
1014 char c = s.buf[i + DUMP_CHUNK];
1016 s.buf[i + DUMP_CHUNK] = 0;
1017 pr_cont("%s", s.buf + i);
1018 s.buf[i + DUMP_CHUNK] = c;
1019 } else {
1020 s.buf[s.count] = 0;
1021 pr_cont("%s", s.buf + i);
1025 #else
1026 static void sync_dump(void)
1029 #endif