clk: samsung: Add bus clock for GPU/G3D on Exynos4412
[linux/fpc-iii.git] / kernel / trace / trace_kdb.c
blob6c1ae6b752d1997310dbc487808be4a01c749b40
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * kdb helper for dumping the ftrace buffer
5 * Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com>
7 * ftrace_dump_buf based on ftrace_dump:
8 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
9 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
12 #include <linux/init.h>
13 #include <linux/kgdb.h>
14 #include <linux/kdb.h>
15 #include <linux/ftrace.h>
17 #include "trace.h"
18 #include "trace_output.h"
20 static struct trace_iterator iter;
21 static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
23 static void ftrace_dump_buf(int skip_entries, long cpu_file)
25 struct trace_array *tr;
26 unsigned int old_userobj;
27 int cnt = 0, cpu;
29 tr = iter.tr;
31 old_userobj = tr->trace_flags;
33 /* don't look at user memory in panic mode */
34 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
36 kdb_printf("Dumping ftrace buffer:\n");
37 if (skip_entries)
38 kdb_printf("(skipping %d entries)\n", skip_entries);
40 /* reset all but tr, trace, and overruns */
41 memset(&iter.seq, 0,
42 sizeof(struct trace_iterator) -
43 offsetof(struct trace_iterator, seq));
44 iter.iter_flags |= TRACE_FILE_LAT_FMT;
45 iter.pos = -1;
47 if (cpu_file == RING_BUFFER_ALL_CPUS) {
48 for_each_tracing_cpu(cpu) {
49 iter.buffer_iter[cpu] =
50 ring_buffer_read_prepare(iter.trace_buffer->buffer,
51 cpu, GFP_ATOMIC);
52 ring_buffer_read_start(iter.buffer_iter[cpu]);
53 tracing_iter_reset(&iter, cpu);
55 } else {
56 iter.cpu_file = cpu_file;
57 iter.buffer_iter[cpu_file] =
58 ring_buffer_read_prepare(iter.trace_buffer->buffer,
59 cpu_file, GFP_ATOMIC);
60 ring_buffer_read_start(iter.buffer_iter[cpu_file]);
61 tracing_iter_reset(&iter, cpu_file);
64 while (trace_find_next_entry_inc(&iter)) {
65 if (!cnt)
66 kdb_printf("---------------------------------\n");
67 cnt++;
69 if (!skip_entries) {
70 print_trace_line(&iter);
71 trace_printk_seq(&iter.seq);
72 } else {
73 skip_entries--;
76 if (KDB_FLAG(CMD_INTERRUPT))
77 goto out;
80 if (!cnt)
81 kdb_printf(" (ftrace buffer empty)\n");
82 else
83 kdb_printf("---------------------------------\n");
85 out:
86 tr->trace_flags = old_userobj;
88 for_each_tracing_cpu(cpu) {
89 if (iter.buffer_iter[cpu]) {
90 ring_buffer_read_finish(iter.buffer_iter[cpu]);
91 iter.buffer_iter[cpu] = NULL;
97 * kdb_ftdump - Dump the ftrace log buffer
99 static int kdb_ftdump(int argc, const char **argv)
101 int skip_entries = 0;
102 long cpu_file;
103 char *cp;
104 int cnt;
105 int cpu;
107 if (argc > 2)
108 return KDB_ARGCOUNT;
110 if (argc) {
111 skip_entries = simple_strtol(argv[1], &cp, 0);
112 if (*cp)
113 skip_entries = 0;
116 if (argc == 2) {
117 cpu_file = simple_strtol(argv[2], &cp, 0);
118 if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 ||
119 !cpu_online(cpu_file))
120 return KDB_BADINT;
121 } else {
122 cpu_file = RING_BUFFER_ALL_CPUS;
125 kdb_trap_printk++;
127 trace_init_global_iter(&iter);
128 iter.buffer_iter = buffer_iter;
130 for_each_tracing_cpu(cpu) {
131 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
134 /* A negative skip_entries means skip all but the last entries */
135 if (skip_entries < 0) {
136 if (cpu_file == RING_BUFFER_ALL_CPUS)
137 cnt = trace_total_entries(NULL);
138 else
139 cnt = trace_total_entries_cpu(NULL, cpu_file);
140 skip_entries = max(cnt + skip_entries, 0);
143 ftrace_dump_buf(skip_entries, cpu_file);
145 for_each_tracing_cpu(cpu) {
146 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
149 kdb_trap_printk--;
151 return 0;
154 static __init int kdb_ftrace_register(void)
156 kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]",
157 "Dump ftrace log; -skip dumps last #entries", 0,
158 KDB_ENABLE_ALWAYS_SAFE);
159 return 0;
162 late_initcall(kdb_ftrace_register);