Linux 4.19.168
[linux/fpc-iii.git] / kernel / trace / trace_irqsoff.c
blob98ea6d28df15d4093ad9b9744b07ec1522191c7c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace irqs off critical timings
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * From code in the latency_tracer, that is:
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
13 #include <linux/kallsyms.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
18 #include "trace.h"
20 #include <trace/events/preemptirq.h>
22 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
23 static struct trace_array *irqsoff_trace __read_mostly;
24 static int tracer_enabled __read_mostly;
26 static DEFINE_PER_CPU(int, tracing_cpu);
28 static DEFINE_RAW_SPINLOCK(max_trace_lock);
30 enum {
31 TRACER_IRQS_OFF = (1 << 1),
32 TRACER_PREEMPT_OFF = (1 << 2),
35 static int trace_type __read_mostly;
37 static int save_flags;
39 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
40 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
42 #ifdef CONFIG_PREEMPT_TRACER
43 static inline int
44 preempt_trace(int pc)
46 return ((trace_type & TRACER_PREEMPT_OFF) && pc);
48 #else
49 # define preempt_trace(pc) (0)
50 #endif
52 #ifdef CONFIG_IRQSOFF_TRACER
53 static inline int
54 irq_trace(void)
56 return ((trace_type & TRACER_IRQS_OFF) &&
57 irqs_disabled());
59 #else
60 # define irq_trace() (0)
61 #endif
63 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
64 static int irqsoff_display_graph(struct trace_array *tr, int set);
65 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
66 #else
67 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
69 return -EINVAL;
71 # define is_graph(tr) false
72 #endif
75 * Sequence count - we record it when starting a measurement and
76 * skip the latency if the sequence has changed - some other section
77 * did a maximum and could disturb our measurement with serial console
78 * printouts, etc. Truly coinciding maximum latencies should be rare
79 * and what happens together happens separately as well, so this doesn't
80 * decrease the validity of the maximum found:
82 static __cacheline_aligned_in_smp unsigned long max_sequence;
84 #ifdef CONFIG_FUNCTION_TRACER
86 * Prologue for the preempt and irqs off function tracers.
88 * Returns 1 if it is OK to continue, and data->disabled is
89 * incremented.
90 * 0 if the trace is to be ignored, and data->disabled
91 * is kept the same.
93 * Note, this function is also used outside this ifdef but
94 * inside the #ifdef of the function graph tracer below.
95 * This is OK, since the function graph tracer is
96 * dependent on the function tracer.
98 static int func_prolog_dec(struct trace_array *tr,
99 struct trace_array_cpu **data,
100 unsigned long *flags)
102 long disabled;
103 int cpu;
106 * Does not matter if we preempt. We test the flags
107 * afterward, to see if irqs are disabled or not.
108 * If we preempt and get a false positive, the flags
109 * test will fail.
111 cpu = raw_smp_processor_id();
112 if (likely(!per_cpu(tracing_cpu, cpu)))
113 return 0;
115 local_save_flags(*flags);
117 * Slight chance to get a false positive on tracing_cpu,
118 * although I'm starting to think there isn't a chance.
119 * Leave this for now just to be paranoid.
121 if (!irqs_disabled_flags(*flags) && !preempt_count())
122 return 0;
124 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
125 disabled = atomic_inc_return(&(*data)->disabled);
127 if (likely(disabled == 1))
128 return 1;
130 atomic_dec(&(*data)->disabled);
132 return 0;
136 * irqsoff uses its own tracer function to keep the overhead down:
138 static void
139 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
140 struct ftrace_ops *op, struct pt_regs *pt_regs)
142 struct trace_array *tr = irqsoff_trace;
143 struct trace_array_cpu *data;
144 unsigned long flags;
146 if (!func_prolog_dec(tr, &data, &flags))
147 return;
149 trace_function(tr, ip, parent_ip, flags, preempt_count());
151 atomic_dec(&data->disabled);
153 #endif /* CONFIG_FUNCTION_TRACER */
155 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
156 static int irqsoff_display_graph(struct trace_array *tr, int set)
158 int cpu;
160 if (!(is_graph(tr) ^ set))
161 return 0;
163 stop_irqsoff_tracer(irqsoff_trace, !set);
165 for_each_possible_cpu(cpu)
166 per_cpu(tracing_cpu, cpu) = 0;
168 tr->max_latency = 0;
169 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
171 return start_irqsoff_tracer(irqsoff_trace, set);
174 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
176 struct trace_array *tr = irqsoff_trace;
177 struct trace_array_cpu *data;
178 unsigned long flags;
179 int ret;
180 int pc;
182 if (ftrace_graph_ignore_func(trace))
183 return 0;
185 * Do not trace a function if it's filtered by set_graph_notrace.
186 * Make the index of ret stack negative to indicate that it should
187 * ignore further functions. But it needs its own ret stack entry
188 * to recover the original index in order to continue tracing after
189 * returning from the function.
191 if (ftrace_graph_notrace_addr(trace->func))
192 return 1;
194 if (!func_prolog_dec(tr, &data, &flags))
195 return 0;
197 pc = preempt_count();
198 ret = __trace_graph_entry(tr, trace, flags, pc);
199 atomic_dec(&data->disabled);
201 return ret;
204 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
206 struct trace_array *tr = irqsoff_trace;
207 struct trace_array_cpu *data;
208 unsigned long flags;
209 int pc;
211 ftrace_graph_addr_finish(trace);
213 if (!func_prolog_dec(tr, &data, &flags))
214 return;
216 pc = preempt_count();
217 __trace_graph_return(tr, trace, flags, pc);
218 atomic_dec(&data->disabled);
221 static void irqsoff_trace_open(struct trace_iterator *iter)
223 if (is_graph(iter->tr))
224 graph_trace_open(iter);
228 static void irqsoff_trace_close(struct trace_iterator *iter)
230 if (iter->private)
231 graph_trace_close(iter);
234 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
235 TRACE_GRAPH_PRINT_PROC | \
236 TRACE_GRAPH_PRINT_ABS_TIME | \
237 TRACE_GRAPH_PRINT_DURATION)
239 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
242 * In graph mode call the graph tracer output function,
243 * otherwise go with the TRACE_FN event handler
245 if (is_graph(iter->tr))
246 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
248 return TRACE_TYPE_UNHANDLED;
251 static void irqsoff_print_header(struct seq_file *s)
253 struct trace_array *tr = irqsoff_trace;
255 if (is_graph(tr))
256 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
257 else
258 trace_default_header(s);
261 static void
262 __trace_function(struct trace_array *tr,
263 unsigned long ip, unsigned long parent_ip,
264 unsigned long flags, int pc)
266 if (is_graph(tr))
267 trace_graph_function(tr, ip, parent_ip, flags, pc);
268 else
269 trace_function(tr, ip, parent_ip, flags, pc);
272 #else
273 #define __trace_function trace_function
275 #ifdef CONFIG_FUNCTION_TRACER
276 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
278 return -1;
280 #endif
282 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
284 return TRACE_TYPE_UNHANDLED;
287 static void irqsoff_trace_open(struct trace_iterator *iter) { }
288 static void irqsoff_trace_close(struct trace_iterator *iter) { }
290 #ifdef CONFIG_FUNCTION_TRACER
291 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
292 static void irqsoff_print_header(struct seq_file *s)
294 trace_default_header(s);
296 #else
297 static void irqsoff_print_header(struct seq_file *s)
299 trace_latency_header(s);
301 #endif /* CONFIG_FUNCTION_TRACER */
302 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
305 * Should this new latency be reported/recorded?
307 static bool report_latency(struct trace_array *tr, u64 delta)
309 if (tracing_thresh) {
310 if (delta < tracing_thresh)
311 return false;
312 } else {
313 if (delta <= tr->max_latency)
314 return false;
316 return true;
319 static void
320 check_critical_timing(struct trace_array *tr,
321 struct trace_array_cpu *data,
322 unsigned long parent_ip,
323 int cpu)
325 u64 T0, T1, delta;
326 unsigned long flags;
327 int pc;
329 T0 = data->preempt_timestamp;
330 T1 = ftrace_now(cpu);
331 delta = T1-T0;
333 local_save_flags(flags);
335 pc = preempt_count();
337 if (!report_latency(tr, delta))
338 goto out;
340 raw_spin_lock_irqsave(&max_trace_lock, flags);
342 /* check if we are still the max latency */
343 if (!report_latency(tr, delta))
344 goto out_unlock;
346 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
347 /* Skip 5 functions to get to the irq/preempt enable function */
348 __trace_stack(tr, flags, 5, pc);
350 if (data->critical_sequence != max_sequence)
351 goto out_unlock;
353 data->critical_end = parent_ip;
355 if (likely(!is_tracing_stopped())) {
356 tr->max_latency = delta;
357 update_max_tr_single(tr, current, cpu);
360 max_sequence++;
362 out_unlock:
363 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
365 out:
366 data->critical_sequence = max_sequence;
367 data->preempt_timestamp = ftrace_now(cpu);
368 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
371 static inline void
372 start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
374 int cpu;
375 struct trace_array *tr = irqsoff_trace;
376 struct trace_array_cpu *data;
377 unsigned long flags;
379 if (!tracer_enabled || !tracing_is_enabled())
380 return;
382 cpu = raw_smp_processor_id();
384 if (per_cpu(tracing_cpu, cpu))
385 return;
387 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
389 if (unlikely(!data) || atomic_read(&data->disabled))
390 return;
392 atomic_inc(&data->disabled);
394 data->critical_sequence = max_sequence;
395 data->preempt_timestamp = ftrace_now(cpu);
396 data->critical_start = parent_ip ? : ip;
398 local_save_flags(flags);
400 __trace_function(tr, ip, parent_ip, flags, pc);
402 per_cpu(tracing_cpu, cpu) = 1;
404 atomic_dec(&data->disabled);
407 static inline void
408 stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
410 int cpu;
411 struct trace_array *tr = irqsoff_trace;
412 struct trace_array_cpu *data;
413 unsigned long flags;
415 cpu = raw_smp_processor_id();
416 /* Always clear the tracing cpu on stopping the trace */
417 if (unlikely(per_cpu(tracing_cpu, cpu)))
418 per_cpu(tracing_cpu, cpu) = 0;
419 else
420 return;
422 if (!tracer_enabled || !tracing_is_enabled())
423 return;
425 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
427 if (unlikely(!data) ||
428 !data->critical_start || atomic_read(&data->disabled))
429 return;
431 atomic_inc(&data->disabled);
433 local_save_flags(flags);
434 __trace_function(tr, ip, parent_ip, flags, pc);
435 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
436 data->critical_start = 0;
437 atomic_dec(&data->disabled);
440 /* start and stop critical timings used to for stoppage (in idle) */
441 void start_critical_timings(void)
443 int pc = preempt_count();
445 if (preempt_trace(pc) || irq_trace())
446 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
448 EXPORT_SYMBOL_GPL(start_critical_timings);
450 void stop_critical_timings(void)
452 int pc = preempt_count();
454 if (preempt_trace(pc) || irq_trace())
455 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
457 EXPORT_SYMBOL_GPL(stop_critical_timings);
459 #ifdef CONFIG_FUNCTION_TRACER
460 static bool function_enabled;
462 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
464 int ret;
466 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
467 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
468 return 0;
470 if (graph)
471 ret = register_ftrace_graph(&irqsoff_graph_return,
472 &irqsoff_graph_entry);
473 else
474 ret = register_ftrace_function(tr->ops);
476 if (!ret)
477 function_enabled = true;
479 return ret;
482 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
484 if (!function_enabled)
485 return;
487 if (graph)
488 unregister_ftrace_graph();
489 else
490 unregister_ftrace_function(tr->ops);
492 function_enabled = false;
495 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
497 if (!(mask & TRACE_ITER_FUNCTION))
498 return 0;
500 if (set)
501 register_irqsoff_function(tr, is_graph(tr), 1);
502 else
503 unregister_irqsoff_function(tr, is_graph(tr));
504 return 1;
506 #else
507 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
509 return 0;
511 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
512 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
514 return 0;
516 #endif /* CONFIG_FUNCTION_TRACER */
518 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
520 struct tracer *tracer = tr->current_trace;
522 if (irqsoff_function_set(tr, mask, set))
523 return 0;
525 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
526 if (mask & TRACE_ITER_DISPLAY_GRAPH)
527 return irqsoff_display_graph(tr, set);
528 #endif
530 return trace_keep_overwrite(tracer, mask, set);
533 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
535 int ret;
537 ret = register_irqsoff_function(tr, graph, 0);
539 if (!ret && tracing_is_enabled())
540 tracer_enabled = 1;
541 else
542 tracer_enabled = 0;
544 return ret;
547 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
549 tracer_enabled = 0;
551 unregister_irqsoff_function(tr, graph);
554 static bool irqsoff_busy;
556 static int __irqsoff_tracer_init(struct trace_array *tr)
558 if (irqsoff_busy)
559 return -EBUSY;
561 save_flags = tr->trace_flags;
563 /* non overwrite screws up the latency tracers */
564 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
565 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
567 tr->max_latency = 0;
568 irqsoff_trace = tr;
569 /* make sure that the tracer is visible */
570 smp_wmb();
572 ftrace_init_array_ops(tr, irqsoff_tracer_call);
574 /* Only toplevel instance supports graph tracing */
575 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
576 is_graph(tr))))
577 printk(KERN_ERR "failed to start irqsoff tracer\n");
579 irqsoff_busy = true;
580 return 0;
583 static void __irqsoff_tracer_reset(struct trace_array *tr)
585 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
586 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
588 stop_irqsoff_tracer(tr, is_graph(tr));
590 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
591 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
592 ftrace_reset_array_ops(tr);
594 irqsoff_busy = false;
597 static void irqsoff_tracer_start(struct trace_array *tr)
599 tracer_enabled = 1;
602 static void irqsoff_tracer_stop(struct trace_array *tr)
604 tracer_enabled = 0;
607 #ifdef CONFIG_IRQSOFF_TRACER
609 * We are only interested in hardirq on/off events:
611 void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
613 unsigned int pc = preempt_count();
615 if (!preempt_trace(pc) && irq_trace())
616 stop_critical_timing(a0, a1, pc);
619 void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
621 unsigned int pc = preempt_count();
623 if (!preempt_trace(pc) && irq_trace())
624 start_critical_timing(a0, a1, pc);
627 static int irqsoff_tracer_init(struct trace_array *tr)
629 trace_type = TRACER_IRQS_OFF;
631 return __irqsoff_tracer_init(tr);
634 static void irqsoff_tracer_reset(struct trace_array *tr)
636 __irqsoff_tracer_reset(tr);
639 static struct tracer irqsoff_tracer __read_mostly =
641 .name = "irqsoff",
642 .init = irqsoff_tracer_init,
643 .reset = irqsoff_tracer_reset,
644 .start = irqsoff_tracer_start,
645 .stop = irqsoff_tracer_stop,
646 .print_max = true,
647 .print_header = irqsoff_print_header,
648 .print_line = irqsoff_print_line,
649 .flag_changed = irqsoff_flag_changed,
650 #ifdef CONFIG_FTRACE_SELFTEST
651 .selftest = trace_selftest_startup_irqsoff,
652 #endif
653 .open = irqsoff_trace_open,
654 .close = irqsoff_trace_close,
655 .allow_instances = true,
656 .use_max_tr = true,
658 #endif /* CONFIG_IRQSOFF_TRACER */
660 #ifdef CONFIG_PREEMPT_TRACER
661 void tracer_preempt_on(unsigned long a0, unsigned long a1)
663 int pc = preempt_count();
665 if (preempt_trace(pc) && !irq_trace())
666 stop_critical_timing(a0, a1, pc);
669 void tracer_preempt_off(unsigned long a0, unsigned long a1)
671 int pc = preempt_count();
673 if (preempt_trace(pc) && !irq_trace())
674 start_critical_timing(a0, a1, pc);
677 static int preemptoff_tracer_init(struct trace_array *tr)
679 trace_type = TRACER_PREEMPT_OFF;
681 return __irqsoff_tracer_init(tr);
684 static void preemptoff_tracer_reset(struct trace_array *tr)
686 __irqsoff_tracer_reset(tr);
689 static struct tracer preemptoff_tracer __read_mostly =
691 .name = "preemptoff",
692 .init = preemptoff_tracer_init,
693 .reset = preemptoff_tracer_reset,
694 .start = irqsoff_tracer_start,
695 .stop = irqsoff_tracer_stop,
696 .print_max = true,
697 .print_header = irqsoff_print_header,
698 .print_line = irqsoff_print_line,
699 .flag_changed = irqsoff_flag_changed,
700 #ifdef CONFIG_FTRACE_SELFTEST
701 .selftest = trace_selftest_startup_preemptoff,
702 #endif
703 .open = irqsoff_trace_open,
704 .close = irqsoff_trace_close,
705 .allow_instances = true,
706 .use_max_tr = true,
708 #endif /* CONFIG_PREEMPT_TRACER */
710 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
712 static int preemptirqsoff_tracer_init(struct trace_array *tr)
714 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
716 return __irqsoff_tracer_init(tr);
719 static void preemptirqsoff_tracer_reset(struct trace_array *tr)
721 __irqsoff_tracer_reset(tr);
724 static struct tracer preemptirqsoff_tracer __read_mostly =
726 .name = "preemptirqsoff",
727 .init = preemptirqsoff_tracer_init,
728 .reset = preemptirqsoff_tracer_reset,
729 .start = irqsoff_tracer_start,
730 .stop = irqsoff_tracer_stop,
731 .print_max = true,
732 .print_header = irqsoff_print_header,
733 .print_line = irqsoff_print_line,
734 .flag_changed = irqsoff_flag_changed,
735 #ifdef CONFIG_FTRACE_SELFTEST
736 .selftest = trace_selftest_startup_preemptirqsoff,
737 #endif
738 .open = irqsoff_trace_open,
739 .close = irqsoff_trace_close,
740 .allow_instances = true,
741 .use_max_tr = true,
743 #endif
745 __init static int init_irqsoff_tracer(void)
747 #ifdef CONFIG_IRQSOFF_TRACER
748 register_tracer(&irqsoff_tracer);
749 #endif
750 #ifdef CONFIG_PREEMPT_TRACER
751 register_tracer(&preemptoff_tracer);
752 #endif
753 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
754 register_tracer(&preemptirqsoff_tracer);
755 #endif
757 return 0;
759 core_initcall(init_irqsoff_tracer);
760 #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */