4 * Copyright (C) 2012 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
31 #define CREATE_TRACE_POINTS
32 #include "trace/sync.h"
34 static void sync_fence_signal_pt(struct sync_pt
*pt
);
35 static int _sync_pt_has_signaled(struct sync_pt
*pt
);
36 static void sync_fence_free(struct kref
*kref
);
37 static void sync_dump(void);
39 static LIST_HEAD(sync_timeline_list_head
);
40 static DEFINE_SPINLOCK(sync_timeline_list_lock
);
42 static LIST_HEAD(sync_fence_list_head
);
43 static DEFINE_SPINLOCK(sync_fence_list_lock
);
45 struct sync_timeline
*sync_timeline_create(const struct sync_timeline_ops
*ops
,
46 int size
, const char *name
)
48 struct sync_timeline
*obj
;
51 if (size
< sizeof(struct sync_timeline
))
54 obj
= kzalloc(size
, GFP_KERNEL
);
58 kref_init(&obj
->kref
);
60 strlcpy(obj
->name
, name
, sizeof(obj
->name
));
62 INIT_LIST_HEAD(&obj
->child_list_head
);
63 spin_lock_init(&obj
->child_list_lock
);
65 INIT_LIST_HEAD(&obj
->active_list_head
);
66 spin_lock_init(&obj
->active_list_lock
);
68 spin_lock_irqsave(&sync_timeline_list_lock
, flags
);
69 list_add_tail(&obj
->sync_timeline_list
, &sync_timeline_list_head
);
70 spin_unlock_irqrestore(&sync_timeline_list_lock
, flags
);
74 EXPORT_SYMBOL(sync_timeline_create
);
76 static void sync_timeline_free(struct kref
*kref
)
78 struct sync_timeline
*obj
=
79 container_of(kref
, struct sync_timeline
, kref
);
82 spin_lock_irqsave(&sync_timeline_list_lock
, flags
);
83 list_del(&obj
->sync_timeline_list
);
84 spin_unlock_irqrestore(&sync_timeline_list_lock
, flags
);
86 if (obj
->ops
->release_obj
)
87 obj
->ops
->release_obj(obj
);
92 void sync_timeline_destroy(struct sync_timeline
*obj
)
94 obj
->destroyed
= true;
96 * Ensure timeline is marked as destroyed before
97 * changing timeline's fences status.
102 * signal any children that their parent is going away.
104 sync_timeline_signal(obj
);
106 kref_put(&obj
->kref
, sync_timeline_free
);
108 EXPORT_SYMBOL(sync_timeline_destroy
);
110 static void sync_timeline_add_pt(struct sync_timeline
*obj
, struct sync_pt
*pt
)
116 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
117 list_add_tail(&pt
->child_list
, &obj
->child_list_head
);
118 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
121 static void sync_timeline_remove_pt(struct sync_pt
*pt
)
123 struct sync_timeline
*obj
= pt
->parent
;
126 spin_lock_irqsave(&obj
->active_list_lock
, flags
);
127 if (!list_empty(&pt
->active_list
))
128 list_del_init(&pt
->active_list
);
129 spin_unlock_irqrestore(&obj
->active_list_lock
, flags
);
131 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
132 if (!list_empty(&pt
->child_list
))
133 list_del_init(&pt
->child_list
);
135 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
138 void sync_timeline_signal(struct sync_timeline
*obj
)
141 LIST_HEAD(signaled_pts
);
142 struct list_head
*pos
, *n
;
144 trace_sync_timeline(obj
);
146 spin_lock_irqsave(&obj
->active_list_lock
, flags
);
148 list_for_each_safe(pos
, n
, &obj
->active_list_head
) {
150 container_of(pos
, struct sync_pt
, active_list
);
152 if (_sync_pt_has_signaled(pt
)) {
154 list_add(&pt
->signaled_list
, &signaled_pts
);
155 kref_get(&pt
->fence
->kref
);
159 spin_unlock_irqrestore(&obj
->active_list_lock
, flags
);
161 list_for_each_safe(pos
, n
, &signaled_pts
) {
163 container_of(pos
, struct sync_pt
, signaled_list
);
166 sync_fence_signal_pt(pt
);
167 kref_put(&pt
->fence
->kref
, sync_fence_free
);
170 EXPORT_SYMBOL(sync_timeline_signal
);
172 struct sync_pt
*sync_pt_create(struct sync_timeline
*parent
, int size
)
176 if (size
< sizeof(struct sync_pt
))
179 pt
= kzalloc(size
, GFP_KERNEL
);
183 INIT_LIST_HEAD(&pt
->active_list
);
184 kref_get(&parent
->kref
);
185 sync_timeline_add_pt(parent
, pt
);
189 EXPORT_SYMBOL(sync_pt_create
);
191 void sync_pt_free(struct sync_pt
*pt
)
193 if (pt
->parent
->ops
->free_pt
)
194 pt
->parent
->ops
->free_pt(pt
);
196 sync_timeline_remove_pt(pt
);
198 kref_put(&pt
->parent
->kref
, sync_timeline_free
);
202 EXPORT_SYMBOL(sync_pt_free
);
204 /* call with pt->parent->active_list_lock held */
205 static int _sync_pt_has_signaled(struct sync_pt
*pt
)
207 int old_status
= pt
->status
;
210 pt
->status
= pt
->parent
->ops
->has_signaled(pt
);
212 if (!pt
->status
&& pt
->parent
->destroyed
)
213 pt
->status
= -ENOENT
;
215 if (pt
->status
!= old_status
)
216 pt
->timestamp
= ktime_get();
221 static struct sync_pt
*sync_pt_dup(struct sync_pt
*pt
)
223 return pt
->parent
->ops
->dup(pt
);
226 /* Adds a sync pt to the active queue. Called when added to a fence */
227 static void sync_pt_activate(struct sync_pt
*pt
)
229 struct sync_timeline
*obj
= pt
->parent
;
233 spin_lock_irqsave(&obj
->active_list_lock
, flags
);
235 err
= _sync_pt_has_signaled(pt
);
239 list_add_tail(&pt
->active_list
, &obj
->active_list_head
);
242 spin_unlock_irqrestore(&obj
->active_list_lock
, flags
);
245 static int sync_fence_release(struct inode
*inode
, struct file
*file
);
246 static unsigned int sync_fence_poll(struct file
*file
, poll_table
*wait
);
247 static long sync_fence_ioctl(struct file
*file
, unsigned int cmd
,
251 static const struct file_operations sync_fence_fops
= {
252 .release
= sync_fence_release
,
253 .poll
= sync_fence_poll
,
254 .unlocked_ioctl
= sync_fence_ioctl
,
255 .compat_ioctl
= sync_fence_ioctl
,
258 static struct sync_fence
*sync_fence_alloc(const char *name
)
260 struct sync_fence
*fence
;
263 fence
= kzalloc(sizeof(struct sync_fence
), GFP_KERNEL
);
267 fence
->file
= anon_inode_getfile("sync_fence", &sync_fence_fops
,
269 if (IS_ERR(fence
->file
))
272 kref_init(&fence
->kref
);
273 strlcpy(fence
->name
, name
, sizeof(fence
->name
));
275 INIT_LIST_HEAD(&fence
->pt_list_head
);
276 INIT_LIST_HEAD(&fence
->waiter_list_head
);
277 spin_lock_init(&fence
->waiter_list_lock
);
279 init_waitqueue_head(&fence
->wq
);
281 spin_lock_irqsave(&sync_fence_list_lock
, flags
);
282 list_add_tail(&fence
->sync_fence_list
, &sync_fence_list_head
);
283 spin_unlock_irqrestore(&sync_fence_list_lock
, flags
);
292 /* TODO: implement a create which takes more that one sync_pt */
293 struct sync_fence
*sync_fence_create(const char *name
, struct sync_pt
*pt
)
295 struct sync_fence
*fence
;
300 fence
= sync_fence_alloc(name
);
305 list_add(&pt
->pt_list
, &fence
->pt_list_head
);
306 sync_pt_activate(pt
);
309 * signal the fence in case pt was activated before
310 * sync_pt_activate(pt) was called
312 sync_fence_signal_pt(pt
);
316 EXPORT_SYMBOL(sync_fence_create
);
318 static int sync_fence_copy_pts(struct sync_fence
*dst
, struct sync_fence
*src
)
320 struct list_head
*pos
;
322 list_for_each(pos
, &src
->pt_list_head
) {
323 struct sync_pt
*orig_pt
=
324 container_of(pos
, struct sync_pt
, pt_list
);
325 struct sync_pt
*new_pt
= sync_pt_dup(orig_pt
);
331 list_add(&new_pt
->pt_list
, &dst
->pt_list_head
);
337 static int sync_fence_merge_pts(struct sync_fence
*dst
, struct sync_fence
*src
)
339 struct list_head
*src_pos
, *dst_pos
, *n
;
341 list_for_each(src_pos
, &src
->pt_list_head
) {
342 struct sync_pt
*src_pt
=
343 container_of(src_pos
, struct sync_pt
, pt_list
);
344 bool collapsed
= false;
346 list_for_each_safe(dst_pos
, n
, &dst
->pt_list_head
) {
347 struct sync_pt
*dst_pt
=
348 container_of(dst_pos
, struct sync_pt
, pt_list
);
349 /* collapse two sync_pts on the same timeline
350 * to a single sync_pt that will signal at
351 * the later of the two
353 if (dst_pt
->parent
== src_pt
->parent
) {
354 if (dst_pt
->parent
->ops
->compare(dst_pt
, src_pt
)
356 struct sync_pt
*new_pt
=
362 list_replace(&dst_pt
->pt_list
,
364 sync_pt_free(dst_pt
);
372 struct sync_pt
*new_pt
= sync_pt_dup(src_pt
);
378 list_add(&new_pt
->pt_list
, &dst
->pt_list_head
);
385 static void sync_fence_detach_pts(struct sync_fence
*fence
)
387 struct list_head
*pos
, *n
;
389 list_for_each_safe(pos
, n
, &fence
->pt_list_head
) {
390 struct sync_pt
*pt
= container_of(pos
, struct sync_pt
, pt_list
);
392 sync_timeline_remove_pt(pt
);
396 static void sync_fence_free_pts(struct sync_fence
*fence
)
398 struct list_head
*pos
, *n
;
400 list_for_each_safe(pos
, n
, &fence
->pt_list_head
) {
401 struct sync_pt
*pt
= container_of(pos
, struct sync_pt
, pt_list
);
407 struct sync_fence
*sync_fence_fdget(int fd
)
409 struct file
*file
= fget(fd
);
414 if (file
->f_op
!= &sync_fence_fops
)
417 return file
->private_data
;
423 EXPORT_SYMBOL(sync_fence_fdget
);
425 void sync_fence_put(struct sync_fence
*fence
)
429 EXPORT_SYMBOL(sync_fence_put
);
431 void sync_fence_install(struct sync_fence
*fence
, int fd
)
433 fd_install(fd
, fence
->file
);
435 EXPORT_SYMBOL(sync_fence_install
);
437 static int sync_fence_get_status(struct sync_fence
*fence
)
439 struct list_head
*pos
;
442 list_for_each(pos
, &fence
->pt_list_head
) {
443 struct sync_pt
*pt
= container_of(pos
, struct sync_pt
, pt_list
);
444 int pt_status
= pt
->status
;
449 } else if (status
== 1) {
457 struct sync_fence
*sync_fence_merge(const char *name
,
458 struct sync_fence
*a
, struct sync_fence
*b
)
460 struct sync_fence
*fence
;
461 struct list_head
*pos
;
464 fence
= sync_fence_alloc(name
);
468 err
= sync_fence_copy_pts(fence
, a
);
472 err
= sync_fence_merge_pts(fence
, b
);
476 list_for_each(pos
, &fence
->pt_list_head
) {
478 container_of(pos
, struct sync_pt
, pt_list
);
479 sync_pt_activate(pt
);
483 * signal the fence in case one of it's pts were activated before
484 * they were activated
486 sync_fence_signal_pt(list_first_entry(&fence
->pt_list_head
,
492 sync_fence_free_pts(fence
);
496 EXPORT_SYMBOL(sync_fence_merge
);
498 static void sync_fence_signal_pt(struct sync_pt
*pt
)
500 LIST_HEAD(signaled_waiters
);
501 struct sync_fence
*fence
= pt
->fence
;
502 struct list_head
*pos
;
507 status
= sync_fence_get_status(fence
);
509 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
511 * this should protect against two threads racing on the signaled
512 * false -> true transition
514 if (status
&& !fence
->status
) {
515 list_for_each_safe(pos
, n
, &fence
->waiter_list_head
)
516 list_move(pos
, &signaled_waiters
);
518 fence
->status
= status
;
522 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
525 list_for_each_safe(pos
, n
, &signaled_waiters
) {
526 struct sync_fence_waiter
*waiter
=
527 container_of(pos
, struct sync_fence_waiter
,
531 waiter
->callback(fence
, waiter
);
537 int sync_fence_wait_async(struct sync_fence
*fence
,
538 struct sync_fence_waiter
*waiter
)
543 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
550 list_add_tail(&waiter
->waiter_list
, &fence
->waiter_list_head
);
552 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
556 EXPORT_SYMBOL(sync_fence_wait_async
);
558 int sync_fence_cancel_async(struct sync_fence
*fence
,
559 struct sync_fence_waiter
*waiter
)
561 struct list_head
*pos
;
566 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
568 * Make sure waiter is still in waiter_list because it is possible for
569 * the waiter to be removed from the list while the callback is still
572 list_for_each_safe(pos
, n
, &fence
->waiter_list_head
) {
573 struct sync_fence_waiter
*list_waiter
=
574 container_of(pos
, struct sync_fence_waiter
,
576 if (list_waiter
== waiter
) {
582 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
585 EXPORT_SYMBOL(sync_fence_cancel_async
);
587 static bool sync_fence_check(struct sync_fence
*fence
)
590 * Make sure that reads to fence->status are ordered with the
591 * wait queue event triggering
594 return fence
->status
!= 0;
597 int sync_fence_wait(struct sync_fence
*fence
, long timeout
)
602 trace_sync_wait(fence
, 1);
603 list_for_each_entry(pt
, &fence
->pt_list_head
, pt_list
)
607 timeout
= msecs_to_jiffies(timeout
);
608 err
= wait_event_interruptible_timeout(fence
->wq
,
609 sync_fence_check(fence
),
611 } else if (timeout
< 0) {
612 err
= wait_event_interruptible(fence
->wq
,
613 sync_fence_check(fence
));
615 trace_sync_wait(fence
, 0);
620 if (fence
->status
< 0) {
621 pr_info("fence error %d on [%p]\n", fence
->status
, fence
);
623 return fence
->status
;
626 if (fence
->status
== 0) {
628 pr_info("fence timeout on [%p] after %dms\n", fence
,
629 jiffies_to_msecs(timeout
));
637 EXPORT_SYMBOL(sync_fence_wait
);
639 static void sync_fence_free(struct kref
*kref
)
641 struct sync_fence
*fence
= container_of(kref
, struct sync_fence
, kref
);
643 sync_fence_free_pts(fence
);
648 static int sync_fence_release(struct inode
*inode
, struct file
*file
)
650 struct sync_fence
*fence
= file
->private_data
;
654 * We need to remove all ways to access this fence before droping
657 * start with its membership in the global fence list
659 spin_lock_irqsave(&sync_fence_list_lock
, flags
);
660 list_del(&fence
->sync_fence_list
);
661 spin_unlock_irqrestore(&sync_fence_list_lock
, flags
);
664 * remove its pts from their parents so that sync_timeline_signal()
665 * can't reference the fence.
667 sync_fence_detach_pts(fence
);
669 kref_put(&fence
->kref
, sync_fence_free
);
674 static unsigned int sync_fence_poll(struct file
*file
, poll_table
*wait
)
676 struct sync_fence
*fence
= file
->private_data
;
678 poll_wait(file
, &fence
->wq
, wait
);
681 * Make sure that reads to fence->status are ordered with the
682 * wait queue event triggering
686 if (fence
->status
== 1)
688 else if (fence
->status
< 0)
694 static long sync_fence_ioctl_wait(struct sync_fence
*fence
, unsigned long arg
)
698 if (copy_from_user(&value
, (void __user
*)arg
, sizeof(value
)))
701 return sync_fence_wait(fence
, value
);
704 static long sync_fence_ioctl_merge(struct sync_fence
*fence
, unsigned long arg
)
706 int fd
= get_unused_fd_flags(O_CLOEXEC
);
708 struct sync_fence
*fence2
, *fence3
;
709 struct sync_merge_data data
;
714 if (copy_from_user(&data
, (void __user
*)arg
, sizeof(data
))) {
719 fence2
= sync_fence_fdget(data
.fd2
);
720 if (fence2
== NULL
) {
725 data
.name
[sizeof(data
.name
) - 1] = '\0';
726 fence3
= sync_fence_merge(data
.name
, fence
, fence2
);
727 if (fence3
== NULL
) {
733 if (copy_to_user((void __user
*)arg
, &data
, sizeof(data
))) {
738 sync_fence_install(fence3
, fd
);
739 sync_fence_put(fence2
);
743 sync_fence_put(fence3
);
746 sync_fence_put(fence2
);
753 static int sync_fill_pt_info(struct sync_pt
*pt
, void *data
, int size
)
755 struct sync_pt_info
*info
= data
;
758 if (size
< sizeof(struct sync_pt_info
))
761 info
->len
= sizeof(struct sync_pt_info
);
763 if (pt
->parent
->ops
->fill_driver_data
) {
764 ret
= pt
->parent
->ops
->fill_driver_data(pt
, info
->driver_data
,
765 size
- sizeof(*info
));
772 strlcpy(info
->obj_name
, pt
->parent
->name
, sizeof(info
->obj_name
));
773 strlcpy(info
->driver_name
, pt
->parent
->ops
->driver_name
,
774 sizeof(info
->driver_name
));
775 info
->status
= pt
->status
;
776 info
->timestamp_ns
= ktime_to_ns(pt
->timestamp
);
781 static long sync_fence_ioctl_fence_info(struct sync_fence
*fence
,
784 struct sync_fence_info_data
*data
;
785 struct list_head
*pos
;
790 if (copy_from_user(&size
, (void __user
*)arg
, sizeof(size
)))
793 if (size
< sizeof(struct sync_fence_info_data
))
799 data
= kzalloc(size
, GFP_KERNEL
);
803 strlcpy(data
->name
, fence
->name
, sizeof(data
->name
));
804 data
->status
= fence
->status
;
805 len
= sizeof(struct sync_fence_info_data
);
807 list_for_each(pos
, &fence
->pt_list_head
) {
809 container_of(pos
, struct sync_pt
, pt_list
);
811 ret
= sync_fill_pt_info(pt
, (u8
*)data
+ len
, size
- len
);
821 if (copy_to_user((void __user
*)arg
, data
, len
))
832 static long sync_fence_ioctl(struct file
*file
, unsigned int cmd
,
835 struct sync_fence
*fence
= file
->private_data
;
839 return sync_fence_ioctl_wait(fence
, arg
);
842 return sync_fence_ioctl_merge(fence
, arg
);
844 case SYNC_IOC_FENCE_INFO
:
845 return sync_fence_ioctl_fence_info(fence
, arg
);
852 #ifdef CONFIG_DEBUG_FS
853 static const char *sync_status_str(int status
)
857 else if (status
== 0)
863 static void sync_print_pt(struct seq_file
*s
, struct sync_pt
*pt
, bool fence
)
865 int status
= pt
->status
;
867 seq_printf(s
, " %s%spt %s",
868 fence
? pt
->parent
->name
: "",
870 sync_status_str(status
));
872 struct timeval tv
= ktime_to_timeval(pt
->timestamp
);
874 seq_printf(s
, "@%ld.%06ld", tv
.tv_sec
, tv
.tv_usec
);
877 if (pt
->parent
->ops
->timeline_value_str
&&
878 pt
->parent
->ops
->pt_value_str
) {
881 pt
->parent
->ops
->pt_value_str(pt
, value
, sizeof(value
));
882 seq_printf(s
, ": %s", value
);
884 pt
->parent
->ops
->timeline_value_str(pt
->parent
, value
,
886 seq_printf(s
, " / %s", value
);
888 } else if (pt
->parent
->ops
->print_pt
) {
890 pt
->parent
->ops
->print_pt(s
, pt
);
896 static void sync_print_obj(struct seq_file
*s
, struct sync_timeline
*obj
)
898 struct list_head
*pos
;
901 seq_printf(s
, "%s %s", obj
->name
, obj
->ops
->driver_name
);
903 if (obj
->ops
->timeline_value_str
) {
906 obj
->ops
->timeline_value_str(obj
, value
, sizeof(value
));
907 seq_printf(s
, ": %s", value
);
908 } else if (obj
->ops
->print_obj
) {
910 obj
->ops
->print_obj(s
, obj
);
915 spin_lock_irqsave(&obj
->child_list_lock
, flags
);
916 list_for_each(pos
, &obj
->child_list_head
) {
918 container_of(pos
, struct sync_pt
, child_list
);
919 sync_print_pt(s
, pt
, false);
921 spin_unlock_irqrestore(&obj
->child_list_lock
, flags
);
924 static void sync_print_fence(struct seq_file
*s
, struct sync_fence
*fence
)
926 struct list_head
*pos
;
929 seq_printf(s
, "[%p] %s: %s\n", fence
, fence
->name
,
930 sync_status_str(fence
->status
));
932 list_for_each(pos
, &fence
->pt_list_head
) {
934 container_of(pos
, struct sync_pt
, pt_list
);
935 sync_print_pt(s
, pt
, true);
938 spin_lock_irqsave(&fence
->waiter_list_lock
, flags
);
939 list_for_each(pos
, &fence
->waiter_list_head
) {
940 struct sync_fence_waiter
*waiter
=
941 container_of(pos
, struct sync_fence_waiter
,
944 seq_printf(s
, "waiter %pF\n", waiter
->callback
);
946 spin_unlock_irqrestore(&fence
->waiter_list_lock
, flags
);
949 static int sync_debugfs_show(struct seq_file
*s
, void *unused
)
952 struct list_head
*pos
;
954 seq_puts(s
, "objs:\n--------------\n");
956 spin_lock_irqsave(&sync_timeline_list_lock
, flags
);
957 list_for_each(pos
, &sync_timeline_list_head
) {
958 struct sync_timeline
*obj
=
959 container_of(pos
, struct sync_timeline
,
962 sync_print_obj(s
, obj
);
965 spin_unlock_irqrestore(&sync_timeline_list_lock
, flags
);
967 seq_puts(s
, "fences:\n--------------\n");
969 spin_lock_irqsave(&sync_fence_list_lock
, flags
);
970 list_for_each(pos
, &sync_fence_list_head
) {
971 struct sync_fence
*fence
=
972 container_of(pos
, struct sync_fence
, sync_fence_list
);
974 sync_print_fence(s
, fence
);
977 spin_unlock_irqrestore(&sync_fence_list_lock
, flags
);
981 static int sync_debugfs_open(struct inode
*inode
, struct file
*file
)
983 return single_open(file
, sync_debugfs_show
, inode
->i_private
);
986 static const struct file_operations sync_debugfs_fops
= {
987 .open
= sync_debugfs_open
,
990 .release
= single_release
,
993 static __init
int sync_debugfs_init(void)
995 debugfs_create_file("sync", S_IRUGO
, NULL
, NULL
, &sync_debugfs_fops
);
998 late_initcall(sync_debugfs_init
);
1000 #define DUMP_CHUNK 256
1001 static char sync_dump_buf
[64 * 1024];
1002 static void sync_dump(void)
1004 struct seq_file s
= {
1005 .buf
= sync_dump_buf
,
1006 .size
= sizeof(sync_dump_buf
) - 1,
1010 sync_debugfs_show(&s
, NULL
);
1012 for (i
= 0; i
< s
.count
; i
+= DUMP_CHUNK
) {
1013 if ((s
.count
- i
) > DUMP_CHUNK
) {
1014 char c
= s
.buf
[i
+ DUMP_CHUNK
];
1016 s
.buf
[i
+ DUMP_CHUNK
] = 0;
1017 pr_cont("%s", s
.buf
+ i
);
1018 s
.buf
[i
+ DUMP_CHUNK
] = c
;
1021 pr_cont("%s", s
.buf
+ i
);
1026 static void sync_dump(void)