4 * Copyright (C) 2012 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
31 #define CREATE_TRACE_POINTS
32 #include "trace/sync.h"
34 static void sync_fence_signal_pt(struct sync_pt
*pt
);
35 static int _sync_pt_has_signaled(struct sync_pt
*pt
);
36 static void sync_fence_free(struct kref
*kref
);
37 static void sync_dump(void);
39 static LIST_HEAD(sync_timeline_list_head
);
40 static DEFINE_SPINLOCK(sync_timeline_list_lock
);
42 static LIST_HEAD(sync_fence_list_head
);
43 static DEFINE_SPINLOCK(sync_fence_list_lock
);
45 struct sync_timeline
*sync_timeline_create(const struct sync_timeline_ops
*ops
,
46 int size
, const char *name
)
48 struct sync_timeline
*obj
;
51 if (size
< sizeof(struct sync_timeline
))
54 obj
= kzalloc(size
, GFP_KERNEL
);
58 kref_init(&obj
->kref
);
60 strlcpy(obj
->name
, name
, sizeof(obj
->name
));
62 INIT_LIST_HEAD(&obj
->child_list_head
);
63 spin_lock_init(&obj
->child_list_lock
);
65 INIT_LIST_HEAD(&obj
->active_list_head
);
66 spin_lock_init(&obj
->active_list_lock
);
68 spin_lock_irqsave(&sync_timeline_list_lock
, flags
);
69 list_add_tail(&obj
->sync_timeline_list
, &sync_timeline_list_head
);
70 spin_unlock_irqrestore(&sync_timeline_list_lock
, flags
);
74 EXPORT_SYMBOL(sync_timeline_create
);
76 static void sync_timeline_free(struct kref
*kref
)
78 struct sync_timeline
*obj
=
79 container_of(kref
, struct sync_timeline
, kref
);
82 spin_lock_irqsave(&sync_timeline_list_lock
, flags
);
83 list_del(&obj
->sync_timeline_list
);
84 spin_unlock_irqrestore(&sync_timeline_list_lock
, flags
);
86 if (obj
->ops
->release_obj
)
87 obj
->ops
->release_obj(obj
);
92 void sync_timeline_destroy(struct sync_timeline
*obj
)
94 obj
->destroyed
= true;
98 * signal any children that their parent is going away.
100 sync_timeline_signal(obj
);
102 kref_put(&obj
->kref
, sync_timeline_free
);
104 EXPORT_SYMBOL(sync_timeline_destroy
);
106 static void sync_timeline_add_pt(struct sync_timeline
*obj
, struct sync_pt
*pt
)
112 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
113 list_add_tail(&pt
->child_list
, &obj
->child_list_head
);
114 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
117 static void sync_timeline_remove_pt(struct sync_pt
*pt
)
119 struct sync_timeline
*obj
= pt
->parent
;
122 spin_lock_irqsave(&obj
->active_list_lock
, flags
);
123 if (!list_empty(&pt
->active_list
))
124 list_del_init(&pt
->active_list
);
125 spin_unlock_irqrestore(&obj
->active_list_lock
, flags
);
127 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
128 if (!list_empty(&pt
->child_list
))
129 list_del_init(&pt
->child_list
);
131 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
134 void sync_timeline_signal(struct sync_timeline
*obj
)
137 LIST_HEAD(signaled_pts
);
138 struct list_head
*pos
, *n
;
140 trace_sync_timeline(obj
);
142 spin_lock_irqsave(&obj
->active_list_lock
, flags
);
144 list_for_each_safe(pos
, n
, &obj
->active_list_head
) {
146 container_of(pos
, struct sync_pt
, active_list
);
148 if (_sync_pt_has_signaled(pt
)) {
150 list_add(&pt
->signaled_list
, &signaled_pts
);
151 kref_get(&pt
->fence
->kref
);
155 spin_unlock_irqrestore(&obj
->active_list_lock
, flags
);
157 list_for_each_safe(pos
, n
, &signaled_pts
) {
159 container_of(pos
, struct sync_pt
, signaled_list
);
162 sync_fence_signal_pt(pt
);
163 kref_put(&pt
->fence
->kref
, sync_fence_free
);
166 EXPORT_SYMBOL(sync_timeline_signal
);
168 struct sync_pt
*sync_pt_create(struct sync_timeline
*parent
, int size
)
172 if (size
< sizeof(struct sync_pt
))
175 pt
= kzalloc(size
, GFP_KERNEL
);
179 INIT_LIST_HEAD(&pt
->active_list
);
180 kref_get(&parent
->kref
);
181 sync_timeline_add_pt(parent
, pt
);
185 EXPORT_SYMBOL(sync_pt_create
);
187 void sync_pt_free(struct sync_pt
*pt
)
189 if (pt
->parent
->ops
->free_pt
)
190 pt
->parent
->ops
->free_pt(pt
);
192 sync_timeline_remove_pt(pt
);
194 kref_put(&pt
->parent
->kref
, sync_timeline_free
);
198 EXPORT_SYMBOL(sync_pt_free
);
200 /* call with pt->parent->active_list_lock held */
201 static int _sync_pt_has_signaled(struct sync_pt
*pt
)
203 int old_status
= pt
->status
;
206 pt
->status
= pt
->parent
->ops
->has_signaled(pt
);
208 if (!pt
->status
&& pt
->parent
->destroyed
)
209 pt
->status
= -ENOENT
;
211 if (pt
->status
!= old_status
)
212 pt
->timestamp
= ktime_get();
217 static struct sync_pt
*sync_pt_dup(struct sync_pt
*pt
)
219 return pt
->parent
->ops
->dup(pt
);
222 /* Adds a sync pt to the active queue. Called when added to a fence */
223 static void sync_pt_activate(struct sync_pt
*pt
)
225 struct sync_timeline
*obj
= pt
->parent
;
229 spin_lock_irqsave(&obj
->active_list_lock
, flags
);
231 err
= _sync_pt_has_signaled(pt
);
235 list_add_tail(&pt
->active_list
, &obj
->active_list_head
);
238 spin_unlock_irqrestore(&obj
->active_list_lock
, flags
);
241 static int sync_fence_release(struct inode
*inode
, struct file
*file
);
242 static unsigned int sync_fence_poll(struct file
*file
, poll_table
*wait
);
243 static long sync_fence_ioctl(struct file
*file
, unsigned int cmd
,
247 static const struct file_operations sync_fence_fops
= {
248 .release
= sync_fence_release
,
249 .poll
= sync_fence_poll
,
250 .unlocked_ioctl
= sync_fence_ioctl
,
251 .compat_ioctl
= sync_fence_ioctl
,
254 static struct sync_fence
*sync_fence_alloc(const char *name
)
256 struct sync_fence
*fence
;
259 fence
= kzalloc(sizeof(struct sync_fence
), GFP_KERNEL
);
263 fence
->file
= anon_inode_getfile("sync_fence", &sync_fence_fops
,
265 if (IS_ERR(fence
->file
))
268 kref_init(&fence
->kref
);
269 strlcpy(fence
->name
, name
, sizeof(fence
->name
));
271 INIT_LIST_HEAD(&fence
->pt_list_head
);
272 INIT_LIST_HEAD(&fence
->waiter_list_head
);
273 spin_lock_init(&fence
->waiter_list_lock
);
275 init_waitqueue_head(&fence
->wq
);
277 spin_lock_irqsave(&sync_fence_list_lock
, flags
);
278 list_add_tail(&fence
->sync_fence_list
, &sync_fence_list_head
);
279 spin_unlock_irqrestore(&sync_fence_list_lock
, flags
);
288 /* TODO: implement a create which takes more that one sync_pt */
289 struct sync_fence
*sync_fence_create(const char *name
, struct sync_pt
*pt
)
291 struct sync_fence
*fence
;
296 fence
= sync_fence_alloc(name
);
301 list_add(&pt
->pt_list
, &fence
->pt_list_head
);
302 sync_pt_activate(pt
);
305 * signal the fence in case pt was activated before
306 * sync_pt_activate(pt) was called
308 sync_fence_signal_pt(pt
);
312 EXPORT_SYMBOL(sync_fence_create
);
314 static int sync_fence_copy_pts(struct sync_fence
*dst
, struct sync_fence
*src
)
316 struct list_head
*pos
;
318 list_for_each(pos
, &src
->pt_list_head
) {
319 struct sync_pt
*orig_pt
=
320 container_of(pos
, struct sync_pt
, pt_list
);
321 struct sync_pt
*new_pt
= sync_pt_dup(orig_pt
);
327 list_add(&new_pt
->pt_list
, &dst
->pt_list_head
);
333 static int sync_fence_merge_pts(struct sync_fence
*dst
, struct sync_fence
*src
)
335 struct list_head
*src_pos
, *dst_pos
, *n
;
337 list_for_each(src_pos
, &src
->pt_list_head
) {
338 struct sync_pt
*src_pt
=
339 container_of(src_pos
, struct sync_pt
, pt_list
);
340 bool collapsed
= false;
342 list_for_each_safe(dst_pos
, n
, &dst
->pt_list_head
) {
343 struct sync_pt
*dst_pt
=
344 container_of(dst_pos
, struct sync_pt
, pt_list
);
345 /* collapse two sync_pts on the same timeline
346 * to a single sync_pt that will signal at
347 * the later of the two
349 if (dst_pt
->parent
== src_pt
->parent
) {
350 if (dst_pt
->parent
->ops
->compare(dst_pt
, src_pt
)
352 struct sync_pt
*new_pt
=
358 list_replace(&dst_pt
->pt_list
,
360 sync_pt_free(dst_pt
);
368 struct sync_pt
*new_pt
= sync_pt_dup(src_pt
);
374 list_add(&new_pt
->pt_list
, &dst
->pt_list_head
);
381 static void sync_fence_detach_pts(struct sync_fence
*fence
)
383 struct list_head
*pos
, *n
;
385 list_for_each_safe(pos
, n
, &fence
->pt_list_head
) {
386 struct sync_pt
*pt
= container_of(pos
, struct sync_pt
, pt_list
);
387 sync_timeline_remove_pt(pt
);
391 static void sync_fence_free_pts(struct sync_fence
*fence
)
393 struct list_head
*pos
, *n
;
395 list_for_each_safe(pos
, n
, &fence
->pt_list_head
) {
396 struct sync_pt
*pt
= container_of(pos
, struct sync_pt
, pt_list
);
401 struct sync_fence
*sync_fence_fdget(int fd
)
403 struct file
*file
= fget(fd
);
408 if (file
->f_op
!= &sync_fence_fops
)
411 return file
->private_data
;
417 EXPORT_SYMBOL(sync_fence_fdget
);
419 void sync_fence_put(struct sync_fence
*fence
)
423 EXPORT_SYMBOL(sync_fence_put
);
425 void sync_fence_install(struct sync_fence
*fence
, int fd
)
427 fd_install(fd
, fence
->file
);
429 EXPORT_SYMBOL(sync_fence_install
);
431 static int sync_fence_get_status(struct sync_fence
*fence
)
433 struct list_head
*pos
;
436 list_for_each(pos
, &fence
->pt_list_head
) {
437 struct sync_pt
*pt
= container_of(pos
, struct sync_pt
, pt_list
);
438 int pt_status
= pt
->status
;
443 } else if (status
== 1) {
451 struct sync_fence
*sync_fence_merge(const char *name
,
452 struct sync_fence
*a
, struct sync_fence
*b
)
454 struct sync_fence
*fence
;
455 struct list_head
*pos
;
458 fence
= sync_fence_alloc(name
);
462 err
= sync_fence_copy_pts(fence
, a
);
466 err
= sync_fence_merge_pts(fence
, b
);
470 list_for_each(pos
, &fence
->pt_list_head
) {
472 container_of(pos
, struct sync_pt
, pt_list
);
473 sync_pt_activate(pt
);
477 * signal the fence in case one of it's pts were activated before
478 * they were activated
480 sync_fence_signal_pt(list_first_entry(&fence
->pt_list_head
,
486 sync_fence_free_pts(fence
);
490 EXPORT_SYMBOL(sync_fence_merge
);
492 static void sync_fence_signal_pt(struct sync_pt
*pt
)
494 LIST_HEAD(signaled_waiters
);
495 struct sync_fence
*fence
= pt
->fence
;
496 struct list_head
*pos
;
501 status
= sync_fence_get_status(fence
);
503 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
505 * this should protect against two threads racing on the signaled
506 * false -> true transition
508 if (status
&& !fence
->status
) {
509 list_for_each_safe(pos
, n
, &fence
->waiter_list_head
)
510 list_move(pos
, &signaled_waiters
);
512 fence
->status
= status
;
516 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
519 list_for_each_safe(pos
, n
, &signaled_waiters
) {
520 struct sync_fence_waiter
*waiter
=
521 container_of(pos
, struct sync_fence_waiter
,
525 waiter
->callback(fence
, waiter
);
531 int sync_fence_wait_async(struct sync_fence
*fence
,
532 struct sync_fence_waiter
*waiter
)
537 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
544 list_add_tail(&waiter
->waiter_list
, &fence
->waiter_list_head
);
546 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
550 EXPORT_SYMBOL(sync_fence_wait_async
);
552 int sync_fence_cancel_async(struct sync_fence
*fence
,
553 struct sync_fence_waiter
*waiter
)
555 struct list_head
*pos
;
560 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
562 * Make sure waiter is still in waiter_list because it is possible for
563 * the waiter to be removed from the list while the callback is still
566 list_for_each_safe(pos
, n
, &fence
->waiter_list_head
) {
567 struct sync_fence_waiter
*list_waiter
=
568 container_of(pos
, struct sync_fence_waiter
,
570 if (list_waiter
== waiter
) {
576 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
579 EXPORT_SYMBOL(sync_fence_cancel_async
);
581 static bool sync_fence_check(struct sync_fence
*fence
)
584 * Make sure that reads to fence->status are ordered with the
585 * wait queue event triggering
588 return fence
->status
!= 0;
591 int sync_fence_wait(struct sync_fence
*fence
, long timeout
)
596 trace_sync_wait(fence
, 1);
597 list_for_each_entry(pt
, &fence
->pt_list_head
, pt_list
)
601 timeout
= msecs_to_jiffies(timeout
);
602 err
= wait_event_interruptible_timeout(fence
->wq
,
603 sync_fence_check(fence
),
605 } else if (timeout
< 0) {
606 err
= wait_event_interruptible(fence
->wq
,
607 sync_fence_check(fence
));
609 trace_sync_wait(fence
, 0);
614 if (fence
->status
< 0) {
615 pr_info("fence error %d on [%p]\n", fence
->status
, fence
);
617 return fence
->status
;
620 if (fence
->status
== 0) {
622 pr_info("fence timeout on [%p] after %dms\n", fence
,
623 jiffies_to_msecs(timeout
));
631 EXPORT_SYMBOL(sync_fence_wait
);
633 static void sync_fence_free(struct kref
*kref
)
635 struct sync_fence
*fence
= container_of(kref
, struct sync_fence
, kref
);
637 sync_fence_free_pts(fence
);
642 static int sync_fence_release(struct inode
*inode
, struct file
*file
)
644 struct sync_fence
*fence
= file
->private_data
;
648 * We need to remove all ways to access this fence before droping
651 * start with its membership in the global fence list
653 spin_lock_irqsave(&sync_fence_list_lock
, flags
);
654 list_del(&fence
->sync_fence_list
);
655 spin_unlock_irqrestore(&sync_fence_list_lock
, flags
);
658 * remove its pts from their parents so that sync_timeline_signal()
659 * can't reference the fence.
661 sync_fence_detach_pts(fence
);
663 kref_put(&fence
->kref
, sync_fence_free
);
668 static unsigned int sync_fence_poll(struct file
*file
, poll_table
*wait
)
670 struct sync_fence
*fence
= file
->private_data
;
672 poll_wait(file
, &fence
->wq
, wait
);
675 * Make sure that reads to fence->status are ordered with the
676 * wait queue event triggering
680 if (fence
->status
== 1)
682 else if (fence
->status
< 0)
688 static long sync_fence_ioctl_wait(struct sync_fence
*fence
, unsigned long arg
)
692 if (copy_from_user(&value
, (void __user
*)arg
, sizeof(value
)))
695 return sync_fence_wait(fence
, value
);
698 static long sync_fence_ioctl_merge(struct sync_fence
*fence
, unsigned long arg
)
700 int fd
= get_unused_fd_flags(O_CLOEXEC
);
702 struct sync_fence
*fence2
, *fence3
;
703 struct sync_merge_data data
;
708 if (copy_from_user(&data
, (void __user
*)arg
, sizeof(data
))) {
713 fence2
= sync_fence_fdget(data
.fd2
);
714 if (fence2
== NULL
) {
719 data
.name
[sizeof(data
.name
) - 1] = '\0';
720 fence3
= sync_fence_merge(data
.name
, fence
, fence2
);
721 if (fence3
== NULL
) {
727 if (copy_to_user((void __user
*)arg
, &data
, sizeof(data
))) {
732 sync_fence_install(fence3
, fd
);
733 sync_fence_put(fence2
);
737 sync_fence_put(fence3
);
740 sync_fence_put(fence2
);
747 static int sync_fill_pt_info(struct sync_pt
*pt
, void *data
, int size
)
749 struct sync_pt_info
*info
= data
;
752 if (size
< sizeof(struct sync_pt_info
))
755 info
->len
= sizeof(struct sync_pt_info
);
757 if (pt
->parent
->ops
->fill_driver_data
) {
758 ret
= pt
->parent
->ops
->fill_driver_data(pt
, info
->driver_data
,
759 size
- sizeof(*info
));
766 strlcpy(info
->obj_name
, pt
->parent
->name
, sizeof(info
->obj_name
));
767 strlcpy(info
->driver_name
, pt
->parent
->ops
->driver_name
,
768 sizeof(info
->driver_name
));
769 info
->status
= pt
->status
;
770 info
->timestamp_ns
= ktime_to_ns(pt
->timestamp
);
775 static long sync_fence_ioctl_fence_info(struct sync_fence
*fence
,
778 struct sync_fence_info_data
*data
;
779 struct list_head
*pos
;
784 if (copy_from_user(&size
, (void __user
*)arg
, sizeof(size
)))
787 if (size
< sizeof(struct sync_fence_info_data
))
793 data
= kzalloc(size
, GFP_KERNEL
);
797 strlcpy(data
->name
, fence
->name
, sizeof(data
->name
));
798 data
->status
= fence
->status
;
799 len
= sizeof(struct sync_fence_info_data
);
801 list_for_each(pos
, &fence
->pt_list_head
) {
803 container_of(pos
, struct sync_pt
, pt_list
);
805 ret
= sync_fill_pt_info(pt
, (u8
*)data
+ len
, size
- len
);
815 if (copy_to_user((void __user
*)arg
, data
, len
))
826 static long sync_fence_ioctl(struct file
*file
, unsigned int cmd
,
829 struct sync_fence
*fence
= file
->private_data
;
832 return sync_fence_ioctl_wait(fence
, arg
);
835 return sync_fence_ioctl_merge(fence
, arg
);
837 case SYNC_IOC_FENCE_INFO
:
838 return sync_fence_ioctl_fence_info(fence
, arg
);
845 #ifdef CONFIG_DEBUG_FS
846 static const char *sync_status_str(int status
)
850 else if (status
== 0)
856 static void sync_print_pt(struct seq_file
*s
, struct sync_pt
*pt
, bool fence
)
858 int status
= pt
->status
;
859 seq_printf(s
, " %s%spt %s",
860 fence
? pt
->parent
->name
: "",
862 sync_status_str(status
));
864 struct timeval tv
= ktime_to_timeval(pt
->timestamp
);
865 seq_printf(s
, "@%ld.%06ld", tv
.tv_sec
, tv
.tv_usec
);
868 if (pt
->parent
->ops
->timeline_value_str
&&
869 pt
->parent
->ops
->pt_value_str
) {
871 pt
->parent
->ops
->pt_value_str(pt
, value
, sizeof(value
));
872 seq_printf(s
, ": %s", value
);
874 pt
->parent
->ops
->timeline_value_str(pt
->parent
, value
,
876 seq_printf(s
, " / %s", value
);
878 } else if (pt
->parent
->ops
->print_pt
) {
880 pt
->parent
->ops
->print_pt(s
, pt
);
886 static void sync_print_obj(struct seq_file
*s
, struct sync_timeline
*obj
)
888 struct list_head
*pos
;
891 seq_printf(s
, "%s %s", obj
->name
, obj
->ops
->driver_name
);
893 if (obj
->ops
->timeline_value_str
) {
895 obj
->ops
->timeline_value_str(obj
, value
, sizeof(value
));
896 seq_printf(s
, ": %s", value
);
897 } else if (obj
->ops
->print_obj
) {
899 obj
->ops
->print_obj(s
, obj
);
904 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
905 list_for_each(pos
, &obj
->child_list_head
) {
907 container_of(pos
, struct sync_pt
, child_list
);
908 sync_print_pt(s
, pt
, false);
910 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
913 static void sync_print_fence(struct seq_file
*s
, struct sync_fence
*fence
)
915 struct list_head
*pos
;
918 seq_printf(s
, "[%p] %s: %s\n", fence
, fence
->name
,
919 sync_status_str(fence
->status
));
921 list_for_each(pos
, &fence
->pt_list_head
) {
923 container_of(pos
, struct sync_pt
, pt_list
);
924 sync_print_pt(s
, pt
, true);
927 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
928 list_for_each(pos
, &fence
->waiter_list_head
) {
929 struct sync_fence_waiter
*waiter
=
930 container_of(pos
, struct sync_fence_waiter
,
933 seq_printf(s
, "waiter %pF\n", waiter
->callback
);
935 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
938 static int sync_debugfs_show(struct seq_file
*s
, void *unused
)
941 struct list_head
*pos
;
943 seq_puts(s
, "objs:\n--------------\n");
945 spin_lock_irqsave(&sync_timeline_list_lock
, flags
);
946 list_for_each(pos
, &sync_timeline_list_head
) {
947 struct sync_timeline
*obj
=
948 container_of(pos
, struct sync_timeline
,
951 sync_print_obj(s
, obj
);
954 spin_unlock_irqrestore(&sync_timeline_list_lock
, flags
);
956 seq_puts(s
, "fences:\n--------------\n");
958 spin_lock_irqsave(&sync_fence_list_lock
, flags
);
959 list_for_each(pos
, &sync_fence_list_head
) {
960 struct sync_fence
*fence
=
961 container_of(pos
, struct sync_fence
, sync_fence_list
);
963 sync_print_fence(s
, fence
);
966 spin_unlock_irqrestore(&sync_fence_list_lock
, flags
);
970 static int sync_debugfs_open(struct inode
*inode
, struct file
*file
)
972 return single_open(file
, sync_debugfs_show
, inode
->i_private
);
975 static const struct file_operations sync_debugfs_fops
= {
976 .open
= sync_debugfs_open
,
979 .release
= single_release
,
982 static __init
int sync_debugfs_init(void)
984 debugfs_create_file("sync", S_IRUGO
, NULL
, NULL
, &sync_debugfs_fops
);
987 late_initcall(sync_debugfs_init
);
989 #define DUMP_CHUNK 256
990 static char sync_dump_buf
[64 * 1024];
991 static void sync_dump(void)
993 struct seq_file s
= {
994 .buf
= sync_dump_buf
,
995 .size
= sizeof(sync_dump_buf
) - 1,
999 sync_debugfs_show(&s
, NULL
);
1001 for (i
= 0; i
< s
.count
; i
+= DUMP_CHUNK
) {
1002 if ((s
.count
- i
) > DUMP_CHUNK
) {
1003 char c
= s
.buf
[i
+ DUMP_CHUNK
];
1004 s
.buf
[i
+ DUMP_CHUNK
] = 0;
1005 pr_cont("%s", s
.buf
+ i
);
1006 s
.buf
[i
+ DUMP_CHUNK
] = c
;
1009 pr_cont("%s", s
.buf
+ i
);
1014 static void sync_dump(void)