2 * trace irqs off critical timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * From code in the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/kallsyms.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
21 static struct trace_array
*irqsoff_trace __read_mostly
;
22 static int tracer_enabled __read_mostly
;
24 static DEFINE_PER_CPU(int, tracing_cpu
);
26 static DEFINE_SPINLOCK(max_trace_lock
);
29 TRACER_IRQS_OFF
= (1 << 1),
30 TRACER_PREEMPT_OFF
= (1 << 2),
33 static int trace_type __read_mostly
;
35 static int save_lat_flag
;
37 #ifdef CONFIG_PREEMPT_TRACER
41 return ((trace_type
& TRACER_PREEMPT_OFF
) && preempt_count());
44 # define preempt_trace() (0)
47 #ifdef CONFIG_IRQSOFF_TRACER
51 return ((trace_type
& TRACER_IRQS_OFF
) &&
55 # define irq_trace() (0)
59 * Sequence count - we record it when starting a measurement and
60 * skip the latency if the sequence has changed - some other section
61 * did a maximum and could disturb our measurement with serial console
62 * printouts, etc. Truly coinciding maximum latencies should be rare
63 * and what happens together happens separately as well, so this doesnt
64 * decrease the validity of the maximum found:
66 static __cacheline_aligned_in_smp
unsigned long max_sequence
;
68 #ifdef CONFIG_FUNCTION_TRACER
70 * irqsoff uses its own tracer function to keep the overhead down:
73 irqsoff_tracer_call(unsigned long ip
, unsigned long parent_ip
)
75 struct trace_array
*tr
= irqsoff_trace
;
76 struct trace_array_cpu
*data
;
82 * Does not matter if we preempt. We test the flags
83 * afterward, to see if irqs are disabled or not.
84 * If we preempt and get a false positive, the flags
87 cpu
= raw_smp_processor_id();
88 if (likely(!per_cpu(tracing_cpu
, cpu
)))
91 local_save_flags(flags
);
92 /* slight chance to get a false positive on tracing_cpu */
93 if (!irqs_disabled_flags(flags
))
97 disabled
= atomic_inc_return(&data
->disabled
);
99 if (likely(disabled
== 1))
100 trace_function(tr
, ip
, parent_ip
, flags
, preempt_count());
102 atomic_dec(&data
->disabled
);
105 static struct ftrace_ops trace_ops __read_mostly
=
107 .func
= irqsoff_tracer_call
,
109 #endif /* CONFIG_FUNCTION_TRACER */
112 * Should this new latency be reported/recorded?
114 static int report_latency(cycle_t delta
)
116 if (tracing_thresh
) {
117 if (delta
< tracing_thresh
)
120 if (delta
<= tracing_max_latency
)
127 check_critical_timing(struct trace_array
*tr
,
128 struct trace_array_cpu
*data
,
129 unsigned long parent_ip
,
132 unsigned long latency
, t0
, t1
;
133 cycle_t T0
, T1
, delta
;
138 * usecs conversion is slow so we try to delay the conversion
139 * as long as possible:
141 T0
= data
->preempt_timestamp
;
142 T1
= ftrace_now(cpu
);
145 local_save_flags(flags
);
147 pc
= preempt_count();
149 if (!report_latency(delta
))
152 spin_lock_irqsave(&max_trace_lock
, flags
);
154 /* check if we are still the max latency */
155 if (!report_latency(delta
))
158 trace_function(tr
, CALLER_ADDR0
, parent_ip
, flags
, pc
);
160 latency
= nsecs_to_usecs(delta
);
162 if (data
->critical_sequence
!= max_sequence
)
165 tracing_max_latency
= delta
;
166 t0
= nsecs_to_usecs(T0
);
167 t1
= nsecs_to_usecs(T1
);
169 data
->critical_end
= parent_ip
;
171 update_max_tr_single(tr
, current
, cpu
);
176 spin_unlock_irqrestore(&max_trace_lock
, flags
);
179 data
->critical_sequence
= max_sequence
;
180 data
->preempt_timestamp
= ftrace_now(cpu
);
181 trace_function(tr
, CALLER_ADDR0
, parent_ip
, flags
, pc
);
185 start_critical_timing(unsigned long ip
, unsigned long parent_ip
)
188 struct trace_array
*tr
= irqsoff_trace
;
189 struct trace_array_cpu
*data
;
192 if (likely(!tracer_enabled
))
195 cpu
= raw_smp_processor_id();
197 if (per_cpu(tracing_cpu
, cpu
))
200 data
= tr
->data
[cpu
];
202 if (unlikely(!data
) || atomic_read(&data
->disabled
))
205 atomic_inc(&data
->disabled
);
207 data
->critical_sequence
= max_sequence
;
208 data
->preempt_timestamp
= ftrace_now(cpu
);
209 data
->critical_start
= parent_ip
? : ip
;
211 local_save_flags(flags
);
213 trace_function(tr
, ip
, parent_ip
, flags
, preempt_count());
215 per_cpu(tracing_cpu
, cpu
) = 1;
217 atomic_dec(&data
->disabled
);
221 stop_critical_timing(unsigned long ip
, unsigned long parent_ip
)
224 struct trace_array
*tr
= irqsoff_trace
;
225 struct trace_array_cpu
*data
;
228 cpu
= raw_smp_processor_id();
229 /* Always clear the tracing cpu on stopping the trace */
230 if (unlikely(per_cpu(tracing_cpu
, cpu
)))
231 per_cpu(tracing_cpu
, cpu
) = 0;
238 data
= tr
->data
[cpu
];
240 if (unlikely(!data
) ||
241 !data
->critical_start
|| atomic_read(&data
->disabled
))
244 atomic_inc(&data
->disabled
);
246 local_save_flags(flags
);
247 trace_function(tr
, ip
, parent_ip
, flags
, preempt_count());
248 check_critical_timing(tr
, data
, parent_ip
? : ip
, cpu
);
249 data
->critical_start
= 0;
250 atomic_dec(&data
->disabled
);
253 /* start and stop critical timings used to for stoppage (in idle) */
254 void start_critical_timings(void)
256 if (preempt_trace() || irq_trace())
257 start_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
259 EXPORT_SYMBOL_GPL(start_critical_timings
);
261 void stop_critical_timings(void)
263 if (preempt_trace() || irq_trace())
264 stop_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
266 EXPORT_SYMBOL_GPL(stop_critical_timings
);
268 #ifdef CONFIG_IRQSOFF_TRACER
269 #ifdef CONFIG_PROVE_LOCKING
270 void time_hardirqs_on(unsigned long a0
, unsigned long a1
)
272 if (!preempt_trace() && irq_trace())
273 stop_critical_timing(a0
, a1
);
276 void time_hardirqs_off(unsigned long a0
, unsigned long a1
)
278 if (!preempt_trace() && irq_trace())
279 start_critical_timing(a0
, a1
);
282 #else /* !CONFIG_PROVE_LOCKING */
288 void early_boot_irqs_off(void)
292 void early_boot_irqs_on(void)
296 void trace_softirqs_on(unsigned long ip
)
300 void trace_softirqs_off(unsigned long ip
)
304 inline void print_irqtrace_events(struct task_struct
*curr
)
309 * We are only interested in hardirq on/off events:
311 void trace_hardirqs_on(void)
313 if (!preempt_trace() && irq_trace())
314 stop_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
316 EXPORT_SYMBOL(trace_hardirqs_on
);
318 void trace_hardirqs_off(void)
320 if (!preempt_trace() && irq_trace())
321 start_critical_timing(CALLER_ADDR0
, CALLER_ADDR1
);
323 EXPORT_SYMBOL(trace_hardirqs_off
);
325 void trace_hardirqs_on_caller(unsigned long caller_addr
)
327 if (!preempt_trace() && irq_trace())
328 stop_critical_timing(CALLER_ADDR0
, caller_addr
);
330 EXPORT_SYMBOL(trace_hardirqs_on_caller
);
332 void trace_hardirqs_off_caller(unsigned long caller_addr
)
334 if (!preempt_trace() && irq_trace())
335 start_critical_timing(CALLER_ADDR0
, caller_addr
);
337 EXPORT_SYMBOL(trace_hardirqs_off_caller
);
339 #endif /* CONFIG_PROVE_LOCKING */
340 #endif /* CONFIG_IRQSOFF_TRACER */
342 #ifdef CONFIG_PREEMPT_TRACER
343 void trace_preempt_on(unsigned long a0
, unsigned long a1
)
346 stop_critical_timing(a0
, a1
);
349 void trace_preempt_off(unsigned long a0
, unsigned long a1
)
352 start_critical_timing(a0
, a1
);
354 #endif /* CONFIG_PREEMPT_TRACER */
356 static void start_irqsoff_tracer(struct trace_array
*tr
)
358 register_ftrace_function(&trace_ops
);
359 if (tracing_is_enabled())
365 static void stop_irqsoff_tracer(struct trace_array
*tr
)
368 unregister_ftrace_function(&trace_ops
);
371 static void __irqsoff_tracer_init(struct trace_array
*tr
)
373 save_lat_flag
= trace_flags
& TRACE_ITER_LATENCY_FMT
;
374 trace_flags
|= TRACE_ITER_LATENCY_FMT
;
376 tracing_max_latency
= 0;
378 /* make sure that the tracer is visible */
380 tracing_reset_online_cpus(tr
);
381 start_irqsoff_tracer(tr
);
384 static void irqsoff_tracer_reset(struct trace_array
*tr
)
386 stop_irqsoff_tracer(tr
);
389 trace_flags
&= ~TRACE_ITER_LATENCY_FMT
;
392 static void irqsoff_tracer_start(struct trace_array
*tr
)
397 static void irqsoff_tracer_stop(struct trace_array
*tr
)
402 #ifdef CONFIG_IRQSOFF_TRACER
403 static int irqsoff_tracer_init(struct trace_array
*tr
)
405 trace_type
= TRACER_IRQS_OFF
;
407 __irqsoff_tracer_init(tr
);
410 static struct tracer irqsoff_tracer __read_mostly
=
413 .init
= irqsoff_tracer_init
,
414 .reset
= irqsoff_tracer_reset
,
415 .start
= irqsoff_tracer_start
,
416 .stop
= irqsoff_tracer_stop
,
418 #ifdef CONFIG_FTRACE_SELFTEST
419 .selftest
= trace_selftest_startup_irqsoff
,
422 # define register_irqsoff(trace) register_tracer(&trace)
424 # define register_irqsoff(trace) do { } while (0)
427 #ifdef CONFIG_PREEMPT_TRACER
428 static int preemptoff_tracer_init(struct trace_array
*tr
)
430 trace_type
= TRACER_PREEMPT_OFF
;
432 __irqsoff_tracer_init(tr
);
436 static struct tracer preemptoff_tracer __read_mostly
=
438 .name
= "preemptoff",
439 .init
= preemptoff_tracer_init
,
440 .reset
= irqsoff_tracer_reset
,
441 .start
= irqsoff_tracer_start
,
442 .stop
= irqsoff_tracer_stop
,
444 #ifdef CONFIG_FTRACE_SELFTEST
445 .selftest
= trace_selftest_startup_preemptoff
,
448 # define register_preemptoff(trace) register_tracer(&trace)
450 # define register_preemptoff(trace) do { } while (0)
453 #if defined(CONFIG_IRQSOFF_TRACER) && \
454 defined(CONFIG_PREEMPT_TRACER)
456 static int preemptirqsoff_tracer_init(struct trace_array
*tr
)
458 trace_type
= TRACER_IRQS_OFF
| TRACER_PREEMPT_OFF
;
460 __irqsoff_tracer_init(tr
);
464 static struct tracer preemptirqsoff_tracer __read_mostly
=
466 .name
= "preemptirqsoff",
467 .init
= preemptirqsoff_tracer_init
,
468 .reset
= irqsoff_tracer_reset
,
469 .start
= irqsoff_tracer_start
,
470 .stop
= irqsoff_tracer_stop
,
472 #ifdef CONFIG_FTRACE_SELFTEST
473 .selftest
= trace_selftest_startup_preemptirqsoff
,
477 # define register_preemptirqsoff(trace) register_tracer(&trace)
479 # define register_preemptirqsoff(trace) do { } while (0)
482 __init
static int init_irqsoff_tracer(void)
484 register_irqsoff(irqsoff_tracer
);
485 register_preemptoff(preemptoff_tracer
);
486 register_preemptirqsoff(preemptirqsoff_tracer
);
490 device_initcall(init_irqsoff_tracer
);