2 * Dynamic function tracing support.
4 * Copyright (C) 2008 Shaohua Li <shaohua.li@intel.com>
6 * For licencing details, see COPYING.
8 * Defines low-level handling of mcount calls when the kernel
9 * is compiled with the -pg flag. When using dynamic ftrace, the
10 * mcount call-sites get patched lazily with NOP till they are
11 * enabled. All code mutation routines here take effect atomically.
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
17 #include <asm/cacheflush.h>
18 #include <asm/patch.h>
20 /* In IA64, each function will be added below two bundles with -pg option */
21 static unsigned char __attribute__((aligned(8)))
22 ftrace_orig_code
[MCOUNT_INSN_SIZE
] = {
23 0x02, 0x40, 0x31, 0x10, 0x80, 0x05, /* alloc r40=ar.pfs,12,8,0 */
24 0xb0, 0x02, 0x00, 0x00, 0x42, 0x40, /* mov r43=r0;; */
25 0x05, 0x00, 0xc4, 0x00, /* mov r42=b0 */
26 0x11, 0x48, 0x01, 0x02, 0x00, 0x21, /* mov r41=r1 */
27 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */
28 0x08, 0x00, 0x00, 0x50 /* br.call.sptk.many b0 = _mcount;; */
31 struct ftrace_orig_insn
{
32 u64 dummy1
, dummy2
, dummy3
;
40 /* mcount stub will be converted below for nop */
41 static unsigned char ftrace_nop_code
[MCOUNT_INSN_SIZE
] = {
42 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
43 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
44 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */
45 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
46 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* nop.x 0x0;; */
47 0x00, 0x00, 0x04, 0x00
50 static unsigned char *ftrace_nop_replace(void)
52 return ftrace_nop_code
;
56 * mcount stub will be converted below for call
57 * Note: Just the last instruction is changed against nop
59 static unsigned char __attribute__((aligned(8)))
60 ftrace_call_code
[MCOUNT_INSN_SIZE
] = {
61 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
62 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
63 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */
64 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
65 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, /* brl.many .;;*/
66 0xf8, 0xff, 0xff, 0xc8
69 struct ftrace_call_insn
{
81 static unsigned char *ftrace_call_replace(unsigned long ip
, unsigned long addr
)
83 struct ftrace_call_insn
*code
= (void *)ftrace_call_code
;
84 unsigned long offset
= addr
- (ip
+ 0x10);
86 code
->imm39_l
= offset
>> 24;
87 code
->imm39_h
= offset
>> 40;
88 code
->imm20
= offset
>> 4;
89 code
->i
= offset
>> 63;
90 return ftrace_call_code
;
94 ftrace_modify_code(unsigned long ip
, unsigned char *old_code
,
95 unsigned char *new_code
, int do_check
)
97 unsigned char replaced
[MCOUNT_INSN_SIZE
];
101 * We are paranoid about modifying text, as if a bug was to happen, it
102 * could cause us to read or write to someplace that could cause harm.
103 * Carefully read and modify the code with probe_kernel_*(), and make
104 * sure what we read is what we expected it to be before modifying it.
110 /* read the text we want to modify */
111 if (probe_kernel_read(replaced
, (void *)ip
, MCOUNT_INSN_SIZE
))
114 /* Make sure it is what we expect it to be */
115 if (memcmp(replaced
, old_code
, MCOUNT_INSN_SIZE
) != 0)
119 /* replace the text with the new text */
120 if (probe_kernel_write(((void *)ip
), new_code
, MCOUNT_INSN_SIZE
))
122 flush_icache_range(ip
, ip
+ MCOUNT_INSN_SIZE
);
127 static int ftrace_make_nop_check(struct dyn_ftrace
*rec
, unsigned long addr
)
129 unsigned char __attribute__((aligned(8))) replaced
[MCOUNT_INSN_SIZE
];
130 unsigned long ip
= rec
->ip
;
132 if (probe_kernel_read(replaced
, (void *)ip
, MCOUNT_INSN_SIZE
))
134 if (rec
->flags
& FTRACE_FL_CONVERTED
) {
135 struct ftrace_call_insn
*call_insn
, *tmp_call
;
137 call_insn
= (void *)ftrace_call_code
;
138 tmp_call
= (void *)replaced
;
139 call_insn
->imm39_l
= tmp_call
->imm39_l
;
140 call_insn
->imm39_h
= tmp_call
->imm39_h
;
141 call_insn
->imm20
= tmp_call
->imm20
;
142 call_insn
->i
= tmp_call
->i
;
143 if (memcmp(replaced
, ftrace_call_code
, MCOUNT_INSN_SIZE
) != 0)
147 struct ftrace_orig_insn
*call_insn
, *tmp_call
;
149 call_insn
= (void *)ftrace_orig_code
;
150 tmp_call
= (void *)replaced
;
151 call_insn
->sign
= tmp_call
->sign
;
152 call_insn
->imm20
= tmp_call
->imm20
;
153 if (memcmp(replaced
, ftrace_orig_code
, MCOUNT_INSN_SIZE
) != 0)
159 int ftrace_make_nop(struct module
*mod
,
160 struct dyn_ftrace
*rec
, unsigned long addr
)
165 ret
= ftrace_make_nop_check(rec
, addr
);
168 new = ftrace_nop_replace();
169 return ftrace_modify_code(rec
->ip
, NULL
, new, 0);
172 int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
)
174 unsigned long ip
= rec
->ip
;
175 unsigned char *old
, *new;
177 old
= ftrace_nop_replace();
178 new = ftrace_call_replace(ip
, addr
);
179 return ftrace_modify_code(ip
, old
, new, 1);
182 /* in IA64, _mcount can't directly call ftrace_stub. Only jump is ok */
183 int ftrace_update_ftrace_func(ftrace_func_t func
)
186 unsigned long addr
= ((struct fnptr
*)ftrace_call
)->ip
;
188 if (func
== ftrace_stub
)
190 ip
= ((struct fnptr
*)func
)->ip
;
192 ia64_patch_imm64(addr
+ 2, ip
);
194 flush_icache_range(addr
, addr
+ 16);
198 /* run from kstop_machine */
199 int __init
ftrace_dyn_arch_init(void)