Linux 4.6-rc6
[cris-mirror.git] / include / linux / ftrace.h
blobdea12a6e413bcf49711fa101ca22abed1f446d78
1 /*
2 * Ftrace header. For implementation details beyond the random comments
3 * scattered below, see: Documentation/trace/ftrace-design.txt
4 */
6 #ifndef _LINUX_FTRACE_H
7 #define _LINUX_FTRACE_H
9 #include <linux/trace_clock.h>
10 #include <linux/kallsyms.h>
11 #include <linux/linkage.h>
12 #include <linux/bitops.h>
13 #include <linux/ptrace.h>
14 #include <linux/ktime.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/init.h>
18 #include <linux/fs.h>
20 #include <asm/ftrace.h>
23 * If the arch supports passing the variable contents of
24 * function_trace_op as the third parameter back from the
25 * mcount call, then the arch should define this as 1.
27 #ifndef ARCH_SUPPORTS_FTRACE_OPS
28 #define ARCH_SUPPORTS_FTRACE_OPS 0
29 #endif
32 * If the arch's mcount caller does not support all of ftrace's
33 * features, then it must call an indirect function that
34 * does. Or at least does enough to prevent any unwelcomed side effects.
36 #if !ARCH_SUPPORTS_FTRACE_OPS
37 # define FTRACE_FORCE_LIST_FUNC 1
38 #else
39 # define FTRACE_FORCE_LIST_FUNC 0
40 #endif
42 /* Main tracing buffer and events set up */
43 #ifdef CONFIG_TRACING
44 void trace_init(void);
45 #else
46 static inline void trace_init(void) { }
47 #endif
49 struct module;
50 struct ftrace_hash;
52 #ifdef CONFIG_FUNCTION_TRACER
54 extern int ftrace_enabled;
55 extern int
56 ftrace_enable_sysctl(struct ctl_table *table, int write,
57 void __user *buffer, size_t *lenp,
58 loff_t *ppos);
60 struct ftrace_ops;
62 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
63 struct ftrace_ops *op, struct pt_regs *regs);
65 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
68 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
69 * set in the flags member.
70 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
71 * IPMODIFY are a kind of attribute flags which can be set only before
72 * registering the ftrace_ops, and can not be modified while registered.
73 * Changing those attribute flags after regsitering ftrace_ops will
74 * cause unexpected results.
76 * ENABLED - set/unset when ftrace_ops is registered/unregistered
77 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
78 * allocated ftrace_ops which need special care
79 * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
80 * could be controlled by following calls:
81 * ftrace_function_local_enable
82 * ftrace_function_local_disable
83 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
84 * and passed to the callback. If this flag is set, but the
85 * architecture does not support passing regs
86 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
87 * ftrace_ops will fail to register, unless the next flag
88 * is set.
89 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
90 * handler can handle an arch that does not save regs
91 * (the handler tests if regs == NULL), then it can set
92 * this flag instead. It will not fail registering the ftrace_ops
93 * but, the regs field will be NULL if the arch does not support
94 * passing regs to the handler.
95 * Note, if this flag is set, the SAVE_REGS flag will automatically
96 * get set upon registering the ftrace_ops, if the arch supports it.
97 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
98 * that the call back has its own recursion protection. If it does
99 * not set this, then the ftrace infrastructure will add recursion
100 * protection for the caller.
101 * STUB - The ftrace_ops is just a place holder.
102 * INITIALIZED - The ftrace_ops has already been initialized (first use time
103 * register_ftrace_function() is called, it will initialized the ops)
104 * DELETED - The ops are being deleted, do not let them be registered again.
105 * ADDING - The ops is in the process of being added.
106 * REMOVING - The ops is in the process of being removed.
107 * MODIFYING - The ops is in the process of changing its filter functions.
108 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
109 * The arch specific code sets this flag when it allocated a
110 * trampoline. This lets the arch know that it can update the
111 * trampoline in case the callback function changes.
112 * The ftrace_ops trampoline can be set by the ftrace users, and
113 * in such cases the arch must not modify it. Only the arch ftrace
114 * core code should set this flag.
115 * IPMODIFY - The ops can modify the IP register. This can only be set with
116 * SAVE_REGS. If another ops with this flag set is already registered
117 * for any of the functions that this ops will be registered for, then
118 * this ops will fail to register or set_filter_ip.
119 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
121 enum {
122 FTRACE_OPS_FL_ENABLED = 1 << 0,
123 FTRACE_OPS_FL_DYNAMIC = 1 << 1,
124 FTRACE_OPS_FL_PER_CPU = 1 << 2,
125 FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
126 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
127 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
128 FTRACE_OPS_FL_STUB = 1 << 6,
129 FTRACE_OPS_FL_INITIALIZED = 1 << 7,
130 FTRACE_OPS_FL_DELETED = 1 << 8,
131 FTRACE_OPS_FL_ADDING = 1 << 9,
132 FTRACE_OPS_FL_REMOVING = 1 << 10,
133 FTRACE_OPS_FL_MODIFYING = 1 << 11,
134 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
135 FTRACE_OPS_FL_IPMODIFY = 1 << 13,
136 FTRACE_OPS_FL_PID = 1 << 14,
137 FTRACE_OPS_FL_RCU = 1 << 15,
140 #ifdef CONFIG_DYNAMIC_FTRACE
141 /* The hash used to know what functions callbacks trace */
142 struct ftrace_ops_hash {
143 struct ftrace_hash *notrace_hash;
144 struct ftrace_hash *filter_hash;
145 struct mutex regex_lock;
147 #endif
150 * Note, ftrace_ops can be referenced outside of RCU protection, unless
151 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
152 * core data, the unregistering of it will perform a scheduling on all CPUs
153 * to make sure that there are no more users. Depending on the load of the
154 * system that may take a bit of time.
156 * Any private data added must also take care not to be freed and if private
157 * data is added to a ftrace_ops that is in core code, the user of the
158 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
160 struct ftrace_ops {
161 ftrace_func_t func;
162 struct ftrace_ops *next;
163 unsigned long flags;
164 void *private;
165 ftrace_func_t saved_func;
166 int __percpu *disabled;
167 #ifdef CONFIG_DYNAMIC_FTRACE
168 struct ftrace_ops_hash local_hash;
169 struct ftrace_ops_hash *func_hash;
170 struct ftrace_ops_hash old_hash;
171 unsigned long trampoline;
172 unsigned long trampoline_size;
173 #endif
177 * Type of the current tracing.
179 enum ftrace_tracing_type_t {
180 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
181 FTRACE_TYPE_RETURN, /* Hook the return of the function */
184 /* Current tracing type, default is FTRACE_TYPE_ENTER */
185 extern enum ftrace_tracing_type_t ftrace_tracing_type;
188 * The ftrace_ops must be a static and should also
189 * be read_mostly. These functions do modify read_mostly variables
190 * so use them sparely. Never free an ftrace_op or modify the
191 * next pointer after it has been registered. Even after unregistering
192 * it, the next pointer may still be used internally.
194 int register_ftrace_function(struct ftrace_ops *ops);
195 int unregister_ftrace_function(struct ftrace_ops *ops);
196 void clear_ftrace_function(void);
199 * ftrace_function_local_enable - enable ftrace_ops on current cpu
201 * This function enables tracing on current cpu by decreasing
202 * the per cpu control variable.
203 * It must be called with preemption disabled and only on ftrace_ops
204 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
205 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
207 static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
209 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
210 return;
212 (*this_cpu_ptr(ops->disabled))--;
216 * ftrace_function_local_disable - disable ftrace_ops on current cpu
218 * This function disables tracing on current cpu by increasing
219 * the per cpu control variable.
220 * It must be called with preemption disabled and only on ftrace_ops
221 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
222 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
224 static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
226 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
227 return;
229 (*this_cpu_ptr(ops->disabled))++;
233 * ftrace_function_local_disabled - returns ftrace_ops disabled value
234 * on current cpu
236 * This function returns value of ftrace_ops::disabled on current cpu.
237 * It must be called with preemption disabled and only on ftrace_ops
238 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
239 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
241 static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
243 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
244 return *this_cpu_ptr(ops->disabled);
247 extern void ftrace_stub(unsigned long a0, unsigned long a1,
248 struct ftrace_ops *op, struct pt_regs *regs);
250 #else /* !CONFIG_FUNCTION_TRACER */
252 * (un)register_ftrace_function must be a macro since the ops parameter
253 * must not be evaluated.
255 #define register_ftrace_function(ops) ({ 0; })
256 #define unregister_ftrace_function(ops) ({ 0; })
257 static inline int ftrace_nr_registered_ops(void)
259 return 0;
261 static inline void clear_ftrace_function(void) { }
262 static inline void ftrace_kill(void) { }
263 #endif /* CONFIG_FUNCTION_TRACER */
265 #ifdef CONFIG_STACK_TRACER
267 #define STACK_TRACE_ENTRIES 500
269 struct stack_trace;
271 extern unsigned stack_trace_index[];
272 extern struct stack_trace stack_trace_max;
273 extern unsigned long stack_trace_max_size;
274 extern arch_spinlock_t stack_trace_max_lock;
276 extern int stack_tracer_enabled;
277 void stack_trace_print(void);
279 stack_trace_sysctl(struct ctl_table *table, int write,
280 void __user *buffer, size_t *lenp,
281 loff_t *ppos);
282 #endif
284 struct ftrace_func_command {
285 struct list_head list;
286 char *name;
287 int (*func)(struct ftrace_hash *hash,
288 char *func, char *cmd,
289 char *params, int enable);
292 #ifdef CONFIG_DYNAMIC_FTRACE
294 int ftrace_arch_code_modify_prepare(void);
295 int ftrace_arch_code_modify_post_process(void);
297 struct dyn_ftrace;
299 enum ftrace_bug_type {
300 FTRACE_BUG_UNKNOWN,
301 FTRACE_BUG_INIT,
302 FTRACE_BUG_NOP,
303 FTRACE_BUG_CALL,
304 FTRACE_BUG_UPDATE,
306 extern enum ftrace_bug_type ftrace_bug_type;
309 * Archs can set this to point to a variable that holds the value that was
310 * expected at the call site before calling ftrace_bug().
312 extern const void *ftrace_expected;
314 void ftrace_bug(int err, struct dyn_ftrace *rec);
316 struct seq_file;
318 struct ftrace_probe_ops {
319 void (*func)(unsigned long ip,
320 unsigned long parent_ip,
321 void **data);
322 int (*init)(struct ftrace_probe_ops *ops,
323 unsigned long ip, void **data);
324 void (*free)(struct ftrace_probe_ops *ops,
325 unsigned long ip, void **data);
326 int (*print)(struct seq_file *m,
327 unsigned long ip,
328 struct ftrace_probe_ops *ops,
329 void *data);
332 extern int
333 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
334 void *data);
335 extern void
336 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
337 void *data);
338 extern void
339 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
340 extern void unregister_ftrace_function_probe_all(char *glob);
342 extern int ftrace_text_reserved(const void *start, const void *end);
344 extern int ftrace_nr_registered_ops(void);
346 bool is_ftrace_trampoline(unsigned long addr);
349 * The dyn_ftrace record's flags field is split into two parts.
350 * the first part which is '0-FTRACE_REF_MAX' is a counter of
351 * the number of callbacks that have registered the function that
352 * the dyn_ftrace descriptor represents.
354 * The second part is a mask:
355 * ENABLED - the function is being traced
356 * REGS - the record wants the function to save regs
357 * REGS_EN - the function is set up to save regs.
358 * IPMODIFY - the record allows for the IP address to be changed.
359 * DISABLED - the record is not ready to be touched yet
361 * When a new ftrace_ops is registered and wants a function to save
362 * pt_regs, the rec->flag REGS is set. When the function has been
363 * set up to save regs, the REG_EN flag is set. Once a function
364 * starts saving regs it will do so until all ftrace_ops are removed
365 * from tracing that function.
367 enum {
368 FTRACE_FL_ENABLED = (1UL << 31),
369 FTRACE_FL_REGS = (1UL << 30),
370 FTRACE_FL_REGS_EN = (1UL << 29),
371 FTRACE_FL_TRAMP = (1UL << 28),
372 FTRACE_FL_TRAMP_EN = (1UL << 27),
373 FTRACE_FL_IPMODIFY = (1UL << 26),
374 FTRACE_FL_DISABLED = (1UL << 25),
377 #define FTRACE_REF_MAX_SHIFT 25
378 #define FTRACE_FL_BITS 7
379 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
380 #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
381 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
383 #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
385 struct dyn_ftrace {
386 unsigned long ip; /* address of mcount call-site */
387 unsigned long flags;
388 struct dyn_arch_ftrace arch;
391 int ftrace_force_update(void);
392 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
393 int remove, int reset);
394 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
395 int len, int reset);
396 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
397 int len, int reset);
398 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
399 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
400 void ftrace_free_filter(struct ftrace_ops *ops);
402 int register_ftrace_command(struct ftrace_func_command *cmd);
403 int unregister_ftrace_command(struct ftrace_func_command *cmd);
405 enum {
406 FTRACE_UPDATE_CALLS = (1 << 0),
407 FTRACE_DISABLE_CALLS = (1 << 1),
408 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
409 FTRACE_START_FUNC_RET = (1 << 3),
410 FTRACE_STOP_FUNC_RET = (1 << 4),
414 * The FTRACE_UPDATE_* enum is used to pass information back
415 * from the ftrace_update_record() and ftrace_test_record()
416 * functions. These are called by the code update routines
417 * to find out what is to be done for a given function.
419 * IGNORE - The function is already what we want it to be
420 * MAKE_CALL - Start tracing the function
421 * MODIFY_CALL - Stop saving regs for the function
422 * MAKE_NOP - Stop tracing the function
424 enum {
425 FTRACE_UPDATE_IGNORE,
426 FTRACE_UPDATE_MAKE_CALL,
427 FTRACE_UPDATE_MODIFY_CALL,
428 FTRACE_UPDATE_MAKE_NOP,
431 enum {
432 FTRACE_ITER_FILTER = (1 << 0),
433 FTRACE_ITER_NOTRACE = (1 << 1),
434 FTRACE_ITER_PRINTALL = (1 << 2),
435 FTRACE_ITER_DO_HASH = (1 << 3),
436 FTRACE_ITER_HASH = (1 << 4),
437 FTRACE_ITER_ENABLED = (1 << 5),
440 void arch_ftrace_update_code(int command);
442 struct ftrace_rec_iter;
444 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
445 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
446 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
448 #define for_ftrace_rec_iter(iter) \
449 for (iter = ftrace_rec_iter_start(); \
450 iter; \
451 iter = ftrace_rec_iter_next(iter))
454 int ftrace_update_record(struct dyn_ftrace *rec, int enable);
455 int ftrace_test_record(struct dyn_ftrace *rec, int enable);
456 void ftrace_run_stop_machine(int command);
457 unsigned long ftrace_location(unsigned long ip);
458 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
459 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
461 extern ftrace_func_t ftrace_trace_function;
463 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
464 struct inode *inode, struct file *file);
465 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
466 size_t cnt, loff_t *ppos);
467 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
468 size_t cnt, loff_t *ppos);
469 int ftrace_regex_release(struct inode *inode, struct file *file);
471 void __init
472 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
474 /* defined in arch */
475 extern int ftrace_ip_converted(unsigned long ip);
476 extern int ftrace_dyn_arch_init(void);
477 extern void ftrace_replace_code(int enable);
478 extern int ftrace_update_ftrace_func(ftrace_func_t func);
479 extern void ftrace_caller(void);
480 extern void ftrace_regs_caller(void);
481 extern void ftrace_call(void);
482 extern void ftrace_regs_call(void);
483 extern void mcount_call(void);
485 void ftrace_modify_all_code(int command);
487 #ifndef FTRACE_ADDR
488 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
489 #endif
491 #ifndef FTRACE_GRAPH_ADDR
492 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
493 #endif
495 #ifndef FTRACE_REGS_ADDR
496 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
497 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
498 #else
499 # define FTRACE_REGS_ADDR FTRACE_ADDR
500 #endif
501 #endif
504 * If an arch would like functions that are only traced
505 * by the function graph tracer to jump directly to its own
506 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
507 * to be that address to jump to.
509 #ifndef FTRACE_GRAPH_TRAMP_ADDR
510 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
511 #endif
513 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
514 extern void ftrace_graph_caller(void);
515 extern int ftrace_enable_ftrace_graph_caller(void);
516 extern int ftrace_disable_ftrace_graph_caller(void);
517 #else
518 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
519 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
520 #endif
523 * ftrace_make_nop - convert code into nop
524 * @mod: module structure if called by module load initialization
525 * @rec: the mcount call site record
526 * @addr: the address that the call site should be calling
528 * This is a very sensitive operation and great care needs
529 * to be taken by the arch. The operation should carefully
530 * read the location, check to see if what is read is indeed
531 * what we expect it to be, and then on success of the compare,
532 * it should write to the location.
534 * The code segment at @rec->ip should be a caller to @addr
536 * Return must be:
537 * 0 on success
538 * -EFAULT on error reading the location
539 * -EINVAL on a failed compare of the contents
540 * -EPERM on error writing to the location
541 * Any other value will be considered a failure.
543 extern int ftrace_make_nop(struct module *mod,
544 struct dyn_ftrace *rec, unsigned long addr);
547 * ftrace_make_call - convert a nop call site into a call to addr
548 * @rec: the mcount call site record
549 * @addr: the address that the call site should call
551 * This is a very sensitive operation and great care needs
552 * to be taken by the arch. The operation should carefully
553 * read the location, check to see if what is read is indeed
554 * what we expect it to be, and then on success of the compare,
555 * it should write to the location.
557 * The code segment at @rec->ip should be a nop
559 * Return must be:
560 * 0 on success
561 * -EFAULT on error reading the location
562 * -EINVAL on a failed compare of the contents
563 * -EPERM on error writing to the location
564 * Any other value will be considered a failure.
566 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
568 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
570 * ftrace_modify_call - convert from one addr to another (no nop)
571 * @rec: the mcount call site record
572 * @old_addr: the address expected to be currently called to
573 * @addr: the address to change to
575 * This is a very sensitive operation and great care needs
576 * to be taken by the arch. The operation should carefully
577 * read the location, check to see if what is read is indeed
578 * what we expect it to be, and then on success of the compare,
579 * it should write to the location.
581 * The code segment at @rec->ip should be a caller to @old_addr
583 * Return must be:
584 * 0 on success
585 * -EFAULT on error reading the location
586 * -EINVAL on a failed compare of the contents
587 * -EPERM on error writing to the location
588 * Any other value will be considered a failure.
590 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
591 unsigned long addr);
592 #else
593 /* Should never be called */
594 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
595 unsigned long addr)
597 return -EINVAL;
599 #endif
601 /* May be defined in arch */
602 extern int ftrace_arch_read_dyn_info(char *buf, int size);
604 extern int skip_trace(unsigned long ip);
605 extern void ftrace_module_init(struct module *mod);
606 extern void ftrace_module_enable(struct module *mod);
607 extern void ftrace_release_mod(struct module *mod);
609 extern void ftrace_disable_daemon(void);
610 extern void ftrace_enable_daemon(void);
611 #else /* CONFIG_DYNAMIC_FTRACE */
612 static inline int skip_trace(unsigned long ip) { return 0; }
613 static inline int ftrace_force_update(void) { return 0; }
614 static inline void ftrace_disable_daemon(void) { }
615 static inline void ftrace_enable_daemon(void) { }
616 static inline void ftrace_module_init(struct module *mod) { }
617 static inline void ftrace_module_enable(struct module *mod) { }
618 static inline void ftrace_release_mod(struct module *mod) { }
619 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
621 return -EINVAL;
623 static inline __init int unregister_ftrace_command(char *cmd_name)
625 return -EINVAL;
627 static inline int ftrace_text_reserved(const void *start, const void *end)
629 return 0;
631 static inline unsigned long ftrace_location(unsigned long ip)
633 return 0;
637 * Again users of functions that have ftrace_ops may not
638 * have them defined when ftrace is not enabled, but these
639 * functions may still be called. Use a macro instead of inline.
641 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
642 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
643 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
644 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
645 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
646 #define ftrace_free_filter(ops) do { } while (0)
648 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
649 size_t cnt, loff_t *ppos) { return -ENODEV; }
650 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
651 size_t cnt, loff_t *ppos) { return -ENODEV; }
652 static inline int
653 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
655 static inline bool is_ftrace_trampoline(unsigned long addr)
657 return false;
659 #endif /* CONFIG_DYNAMIC_FTRACE */
661 /* totally disable ftrace - can not re-enable after this */
662 void ftrace_kill(void);
664 static inline void tracer_disable(void)
666 #ifdef CONFIG_FUNCTION_TRACER
667 ftrace_enabled = 0;
668 #endif
672 * Ftrace disable/restore without lock. Some synchronization mechanism
673 * must be used to prevent ftrace_enabled to be changed between
674 * disable/restore.
676 static inline int __ftrace_enabled_save(void)
678 #ifdef CONFIG_FUNCTION_TRACER
679 int saved_ftrace_enabled = ftrace_enabled;
680 ftrace_enabled = 0;
681 return saved_ftrace_enabled;
682 #else
683 return 0;
684 #endif
687 static inline void __ftrace_enabled_restore(int enabled)
689 #ifdef CONFIG_FUNCTION_TRACER
690 ftrace_enabled = enabled;
691 #endif
694 /* All archs should have this, but we define it for consistency */
695 #ifndef ftrace_return_address0
696 # define ftrace_return_address0 __builtin_return_address(0)
697 #endif
699 /* Archs may use other ways for ADDR1 and beyond */
700 #ifndef ftrace_return_address
701 # ifdef CONFIG_FRAME_POINTER
702 # define ftrace_return_address(n) __builtin_return_address(n)
703 # else
704 # define ftrace_return_address(n) 0UL
705 # endif
706 #endif
708 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
709 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
710 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
711 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
712 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
713 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
714 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
716 static inline unsigned long get_lock_parent_ip(void)
718 unsigned long addr = CALLER_ADDR0;
720 if (!in_lock_functions(addr))
721 return addr;
722 addr = CALLER_ADDR1;
723 if (!in_lock_functions(addr))
724 return addr;
725 return CALLER_ADDR2;
728 #ifdef CONFIG_IRQSOFF_TRACER
729 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
730 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
731 #else
732 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
733 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
734 #endif
736 #ifdef CONFIG_PREEMPT_TRACER
737 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
738 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
739 #else
741 * Use defines instead of static inlines because some arches will make code out
742 * of the CALLER_ADDR, when we really want these to be a real nop.
744 # define trace_preempt_on(a0, a1) do { } while (0)
745 # define trace_preempt_off(a0, a1) do { } while (0)
746 #endif
748 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
749 extern void ftrace_init(void);
750 #else
751 static inline void ftrace_init(void) { }
752 #endif
755 * Structure that defines an entry function trace.
757 struct ftrace_graph_ent {
758 unsigned long func; /* Current function */
759 int depth;
763 * Structure that defines a return function trace.
765 struct ftrace_graph_ret {
766 unsigned long func; /* Current function */
767 unsigned long long calltime;
768 unsigned long long rettime;
769 /* Number of functions that overran the depth limit for current task */
770 unsigned long overrun;
771 int depth;
774 /* Type of the callback handlers for tracing function graph*/
775 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
776 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
778 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
780 /* for init task */
781 #define INIT_FTRACE_GRAPH .ret_stack = NULL,
784 * Stack of return addresses for functions
785 * of a thread.
786 * Used in struct thread_info
788 struct ftrace_ret_stack {
789 unsigned long ret;
790 unsigned long func;
791 unsigned long long calltime;
792 unsigned long long subtime;
793 unsigned long fp;
797 * Primary handler of a function return.
798 * It relays on ftrace_return_to_handler.
799 * Defined in entry_32/64.S
801 extern void return_to_handler(void);
803 extern int
804 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
805 unsigned long frame_pointer);
808 * Sometimes we don't want to trace a function with the function
809 * graph tracer but we want them to keep traced by the usual function
810 * tracer if the function graph tracer is not configured.
812 #define __notrace_funcgraph notrace
814 #define FTRACE_NOTRACE_DEPTH 65536
815 #define FTRACE_RETFUNC_DEPTH 50
816 #define FTRACE_RETSTACK_ALLOC_SIZE 32
817 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
818 trace_func_graph_ent_t entryfunc);
820 extern bool ftrace_graph_is_dead(void);
821 extern void ftrace_graph_stop(void);
823 /* The current handlers in use */
824 extern trace_func_graph_ret_t ftrace_graph_return;
825 extern trace_func_graph_ent_t ftrace_graph_entry;
827 extern void unregister_ftrace_graph(void);
829 extern void ftrace_graph_init_task(struct task_struct *t);
830 extern void ftrace_graph_exit_task(struct task_struct *t);
831 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
833 static inline int task_curr_ret_stack(struct task_struct *t)
835 return t->curr_ret_stack;
838 static inline void pause_graph_tracing(void)
840 atomic_inc(&current->tracing_graph_pause);
843 static inline void unpause_graph_tracing(void)
845 atomic_dec(&current->tracing_graph_pause);
847 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
849 #define __notrace_funcgraph
850 #define INIT_FTRACE_GRAPH
852 static inline void ftrace_graph_init_task(struct task_struct *t) { }
853 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
854 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
856 static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
857 trace_func_graph_ent_t entryfunc)
859 return -1;
861 static inline void unregister_ftrace_graph(void) { }
863 static inline int task_curr_ret_stack(struct task_struct *tsk)
865 return -1;
868 static inline void pause_graph_tracing(void) { }
869 static inline void unpause_graph_tracing(void) { }
870 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
872 #ifdef CONFIG_TRACING
874 /* flags for current->trace */
875 enum {
876 TSK_TRACE_FL_TRACE_BIT = 0,
877 TSK_TRACE_FL_GRAPH_BIT = 1,
879 enum {
880 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
881 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
884 static inline void set_tsk_trace_trace(struct task_struct *tsk)
886 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
889 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
891 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
894 static inline int test_tsk_trace_trace(struct task_struct *tsk)
896 return tsk->trace & TSK_TRACE_FL_TRACE;
899 static inline void set_tsk_trace_graph(struct task_struct *tsk)
901 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
904 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
906 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
909 static inline int test_tsk_trace_graph(struct task_struct *tsk)
911 return tsk->trace & TSK_TRACE_FL_GRAPH;
914 enum ftrace_dump_mode;
916 extern enum ftrace_dump_mode ftrace_dump_on_oops;
917 extern int tracepoint_printk;
919 extern void disable_trace_on_warning(void);
920 extern int __disable_trace_on_warning;
922 #ifdef CONFIG_PREEMPT
923 #define INIT_TRACE_RECURSION .trace_recursion = 0,
924 #endif
926 #else /* CONFIG_TRACING */
927 static inline void disable_trace_on_warning(void) { }
928 #endif /* CONFIG_TRACING */
930 #ifndef INIT_TRACE_RECURSION
931 #define INIT_TRACE_RECURSION
932 #endif
934 #ifdef CONFIG_FTRACE_SYSCALLS
936 unsigned long arch_syscall_addr(int nr);
938 #endif /* CONFIG_FTRACE_SYSCALLS */
940 #endif /* _LINUX_FTRACE_H */