1 #ifndef _ASM_X86_FTRACE_H
2 #define _ASM_X86_FTRACE_H
6 /* skip is set if the stack was already partially adjusted */
7 .macro MCOUNT_SAVE_FRAME skip
=0
9 * We add enough stack to save all regs.
11 subq $
(SS
+8-\skip
), %rsp
19 /* Move RIP to its proper location */
24 .macro MCOUNT_RESTORE_FRAME skip
=0
32 addq $
(SS
+8-\skip
), %rsp
37 #ifdef CONFIG_FUNCTION_TRACER
38 #ifdef CC_USING_FENTRY
39 # define MCOUNT_ADDR ((long)(__fentry__))
41 # define MCOUNT_ADDR ((long)(mcount))
43 #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
45 #ifdef CONFIG_DYNAMIC_FTRACE
46 #define ARCH_SUPPORTS_FTRACE_OPS 1
50 extern void mcount(void);
51 extern atomic_t modifying_ftrace_code
;
52 extern void __fentry__(void);
54 static inline unsigned long ftrace_call_adjust(unsigned long addr
)
57 * addr is the address of the mcount call instruction.
58 * recordmcount does the necessary offset calculation.
63 #ifdef CONFIG_DYNAMIC_FTRACE
65 struct dyn_arch_ftrace
{
66 /* No extra data needed for x86 */
69 int ftrace_int3_handler(struct pt_regs
*regs
);
71 #endif /* CONFIG_DYNAMIC_FTRACE */
72 #endif /* __ASSEMBLY__ */
73 #endif /* CONFIG_FUNCTION_TRACER */
76 #if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
78 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
79 #include <asm/compat.h>
82 * Because ia32 syscalls do not map to x86_64 syscall numbers
83 * this screws up the trace output when tracing a ia32 task.
84 * Instead of reporting bogus syscalls, just do not trace them.
86 * If the user realy wants these, then they should use the
87 * raw syscall tracepoints with filtering.
89 #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
90 static inline bool arch_trace_is_compat_syscall(struct pt_regs
*regs
)
96 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
97 #endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
99 #endif /* _ASM_X86_FTRACE_H */