Merge tag 'locking-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / arm / include / asm / ftrace.h
blob48ec1d0337da71a0e9f0bfaf98d0e3c30be0e707
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_ARM_FTRACE
3 #define _ASM_ARM_FTRACE
5 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
6 #define ARCH_SUPPORTS_FTRACE_OPS 1
7 #endif
9 #ifdef CONFIG_FUNCTION_TRACER
10 #define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
11 #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
13 #ifndef __ASSEMBLY__
14 extern void __gnu_mcount_nc(void);
16 #ifdef CONFIG_DYNAMIC_FTRACE
17 struct dyn_arch_ftrace {
20 static inline unsigned long ftrace_call_adjust(unsigned long addr)
22 /* With Thumb-2, the recorded addresses have the lsb set */
23 return addr & ~1;
25 #endif
27 #endif
29 #endif
31 #ifndef __ASSEMBLY__
33 #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
35 * return_address uses walk_stackframe to do it's work. If both
36 * CONFIG_FRAME_POINTER=y and CONFIG_ARM_UNWIND=y walk_stackframe uses unwind
37 * information. For this to work in the function tracer many functions would
38 * have to be marked with __notrace. So for now just depend on
39 * !CONFIG_ARM_UNWIND.
42 void *return_address(unsigned int);
44 #else
46 static inline void *return_address(unsigned int level)
48 return NULL;
51 #endif
53 #define ftrace_return_address(n) return_address(n)
55 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
57 static inline bool arch_syscall_match_sym_name(const char *sym,
58 const char *name)
60 if (!strcmp(sym, "sys_mmap2"))
61 sym = "sys_mmap_pgoff";
62 else if (!strcmp(sym, "sys_statfs64_wrapper"))
63 sym = "sys_statfs64";
64 else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
65 sym = "sys_fstatfs64";
66 else if (!strcmp(sym, "sys_arm_fadvise64_64"))
67 sym = "sys_fadvise64_64";
69 /* Ignore case since sym may start with "SyS" instead of "sys" */
70 return !strcasecmp(sym, name);
73 #endif /* ifndef __ASSEMBLY__ */
75 #endif /* _ASM_ARM_FTRACE */