1 // SPDX-License-Identifier: GPL-2.0
3 * Dynamic function tracing support.
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/spinlock.h>
16 #include <linux/hardirq.h>
17 #include <linux/uaccess.h>
18 #include <linux/ftrace.h>
19 #include <linux/percpu.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
26 #include <trace/syscall.h>
28 #include <asm/set_memory.h>
29 #include <asm/kprobes.h>
30 #include <asm/ftrace.h>
33 #ifdef CONFIG_DYNAMIC_FTRACE
35 int ftrace_arch_code_modify_prepare(void)
38 set_all_modules_text_rw();
42 int ftrace_arch_code_modify_post_process(void)
44 set_all_modules_text_ro();
49 union ftrace_code_union
{
50 char code
[MCOUNT_INSN_SIZE
];
54 } __attribute__((packed
));
57 static int ftrace_calc_offset(long ip
, long addr
)
59 return (int)(addr
- ip
);
62 static unsigned char *
63 ftrace_text_replace(unsigned char op
, unsigned long ip
, unsigned long addr
)
65 static union ftrace_code_union calc
;
68 calc
.offset
= ftrace_calc_offset(ip
+ MCOUNT_INSN_SIZE
, addr
);
73 static unsigned char *
74 ftrace_call_replace(unsigned long ip
, unsigned long addr
)
76 return ftrace_text_replace(0xe8, ip
, addr
);
80 within(unsigned long addr
, unsigned long start
, unsigned long end
)
82 return addr
>= start
&& addr
< end
;
85 static unsigned long text_ip_addr(unsigned long ip
)
88 * On x86_64, kernel text mappings are mapped read-only, so we use
89 * the kernel identity mapping instead of the kernel text mapping
90 * to modify the kernel text.
92 * For 32bit kernels, these mappings are same and we can use
93 * kernel identity mapping to modify code.
95 if (within(ip
, (unsigned long)_text
, (unsigned long)_etext
))
96 ip
= (unsigned long)__va(__pa_symbol(ip
));
101 static const unsigned char *ftrace_nop_replace(void)
103 return ideal_nops
[NOP_ATOMIC5
];
107 ftrace_modify_code_direct(unsigned long ip
, unsigned const char *old_code
,
108 unsigned const char *new_code
)
110 unsigned char replaced
[MCOUNT_INSN_SIZE
];
112 ftrace_expected
= old_code
;
116 * We are paranoid about modifying text, as if a bug was to happen, it
117 * could cause us to read or write to someplace that could cause harm.
118 * Carefully read and modify the code with probe_kernel_*(), and make
119 * sure what we read is what we expected it to be before modifying it.
122 /* read the text we want to modify */
123 if (probe_kernel_read(replaced
, (void *)ip
, MCOUNT_INSN_SIZE
))
126 /* Make sure it is what we expect it to be */
127 if (memcmp(replaced
, old_code
, MCOUNT_INSN_SIZE
) != 0)
130 ip
= text_ip_addr(ip
);
132 /* replace the text with the new text */
133 if (probe_kernel_write((void *)ip
, new_code
, MCOUNT_INSN_SIZE
))
141 int ftrace_make_nop(struct module
*mod
,
142 struct dyn_ftrace
*rec
, unsigned long addr
)
144 unsigned const char *new, *old
;
145 unsigned long ip
= rec
->ip
;
147 old
= ftrace_call_replace(ip
, addr
);
148 new = ftrace_nop_replace();
151 * On boot up, and when modules are loaded, the MCOUNT_ADDR
152 * is converted to a nop, and will never become MCOUNT_ADDR
153 * again. This code is either running before SMP (on boot up)
154 * or before the code will ever be executed (module load).
155 * We do not want to use the breakpoint version in this case,
156 * just modify the code directly.
158 if (addr
== MCOUNT_ADDR
)
159 return ftrace_modify_code_direct(rec
->ip
, old
, new);
161 ftrace_expected
= NULL
;
163 /* Normal cases use add_brk_on_nop */
164 WARN_ONCE(1, "invalid use of ftrace_make_nop");
168 int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
)
170 unsigned const char *new, *old
;
171 unsigned long ip
= rec
->ip
;
173 old
= ftrace_nop_replace();
174 new = ftrace_call_replace(ip
, addr
);
176 /* Should only be called when module is loaded */
177 return ftrace_modify_code_direct(rec
->ip
, old
, new);
181 * The modifying_ftrace_code is used to tell the breakpoint
182 * handler to call ftrace_int3_handler(). If it fails to
183 * call this handler for a breakpoint added by ftrace, then
184 * the kernel may crash.
186 * As atomic_writes on x86 do not need a barrier, we do not
187 * need to add smp_mb()s for this to work. It is also considered
188 * that we can not read the modifying_ftrace_code before
189 * executing the breakpoint. That would be quite remarkable if
190 * it could do that. Here's the flow that is required:
196 * <trap-int3> // implicit (r)mb
197 * if (atomic_read(mfc))
198 * call ftrace_int3_handler()
200 * Then when we are finished:
204 * If we hit a breakpoint that was not set by ftrace, it does not
205 * matter if ftrace_int3_handler() is called or not. It will
206 * simply be ignored. But it is crucial that a ftrace nop/caller
207 * breakpoint is handled. No other user should ever place a
208 * breakpoint on an ftrace nop/caller location. It must only
209 * be done by this code.
211 atomic_t modifying_ftrace_code __read_mostly
;
214 ftrace_modify_code(unsigned long ip
, unsigned const char *old_code
,
215 unsigned const char *new_code
);
218 * Should never be called:
219 * As it is only called by __ftrace_replace_code() which is called by
220 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
221 * which is called to turn mcount into nops or nops into function calls
222 * but not to convert a function from not using regs to one that uses
223 * regs, which ftrace_modify_call() is for.
225 int ftrace_modify_call(struct dyn_ftrace
*rec
, unsigned long old_addr
,
229 ftrace_expected
= NULL
;
233 static unsigned long ftrace_update_func
;
235 static int update_ftrace_func(unsigned long ip
, void *new)
237 unsigned char old
[MCOUNT_INSN_SIZE
];
240 memcpy(old
, (void *)ip
, MCOUNT_INSN_SIZE
);
242 ftrace_update_func
= ip
;
243 /* Make sure the breakpoints see the ftrace_update_func update */
246 /* See comment above by declaration of modifying_ftrace_code */
247 atomic_inc(&modifying_ftrace_code
);
249 ret
= ftrace_modify_code(ip
, old
, new);
251 atomic_dec(&modifying_ftrace_code
);
256 int ftrace_update_ftrace_func(ftrace_func_t func
)
258 unsigned long ip
= (unsigned long)(&ftrace_call
);
262 new = ftrace_call_replace(ip
, (unsigned long)func
);
263 ret
= update_ftrace_func(ip
, new);
265 /* Also update the regs callback function */
267 ip
= (unsigned long)(&ftrace_regs_call
);
268 new = ftrace_call_replace(ip
, (unsigned long)func
);
269 ret
= update_ftrace_func(ip
, new);
275 static nokprobe_inline
int is_ftrace_caller(unsigned long ip
)
277 if (ip
== ftrace_update_func
)
284 * A breakpoint was added to the code address we are about to
285 * modify, and this is the handle that will just skip over it.
286 * We are either changing a nop into a trace call, or a trace
287 * call to a nop. While the change is taking place, we treat
288 * it just like it was a nop.
290 int ftrace_int3_handler(struct pt_regs
*regs
)
294 if (WARN_ON_ONCE(!regs
))
298 if (!ftrace_location(ip
) && !is_ftrace_caller(ip
))
301 regs
->ip
+= MCOUNT_INSN_SIZE
- 1;
305 NOKPROBE_SYMBOL(ftrace_int3_handler
);
307 static int ftrace_write(unsigned long ip
, const char *val
, int size
)
309 ip
= text_ip_addr(ip
);
311 if (probe_kernel_write((void *)ip
, val
, size
))
317 static int add_break(unsigned long ip
, const char *old
)
319 unsigned char replaced
[MCOUNT_INSN_SIZE
];
320 unsigned char brk
= BREAKPOINT_INSTRUCTION
;
322 if (probe_kernel_read(replaced
, (void *)ip
, MCOUNT_INSN_SIZE
))
325 ftrace_expected
= old
;
327 /* Make sure it is what we expect it to be */
328 if (memcmp(replaced
, old
, MCOUNT_INSN_SIZE
) != 0)
331 return ftrace_write(ip
, &brk
, 1);
334 static int add_brk_on_call(struct dyn_ftrace
*rec
, unsigned long addr
)
336 unsigned const char *old
;
337 unsigned long ip
= rec
->ip
;
339 old
= ftrace_call_replace(ip
, addr
);
341 return add_break(rec
->ip
, old
);
345 static int add_brk_on_nop(struct dyn_ftrace
*rec
)
347 unsigned const char *old
;
349 old
= ftrace_nop_replace();
351 return add_break(rec
->ip
, old
);
354 static int add_breakpoints(struct dyn_ftrace
*rec
, int enable
)
356 unsigned long ftrace_addr
;
359 ftrace_addr
= ftrace_get_addr_curr(rec
);
361 ret
= ftrace_test_record(rec
, enable
);
364 case FTRACE_UPDATE_IGNORE
:
367 case FTRACE_UPDATE_MAKE_CALL
:
368 /* converting nop to call */
369 return add_brk_on_nop(rec
);
371 case FTRACE_UPDATE_MODIFY_CALL
:
372 case FTRACE_UPDATE_MAKE_NOP
:
373 /* converting a call to a nop */
374 return add_brk_on_call(rec
, ftrace_addr
);
380 * On error, we need to remove breakpoints. This needs to
381 * be done caefully. If the address does not currently have a
382 * breakpoint, we know we are done. Otherwise, we look at the
383 * remaining 4 bytes of the instruction. If it matches a nop
384 * we replace the breakpoint with the nop. Otherwise we replace
385 * it with the call instruction.
387 static int remove_breakpoint(struct dyn_ftrace
*rec
)
389 unsigned char ins
[MCOUNT_INSN_SIZE
];
390 unsigned char brk
= BREAKPOINT_INSTRUCTION
;
391 const unsigned char *nop
;
392 unsigned long ftrace_addr
;
393 unsigned long ip
= rec
->ip
;
395 /* If we fail the read, just give up */
396 if (probe_kernel_read(ins
, (void *)ip
, MCOUNT_INSN_SIZE
))
399 /* If this does not have a breakpoint, we are done */
403 nop
= ftrace_nop_replace();
406 * If the last 4 bytes of the instruction do not match
407 * a nop, then we assume that this is a call to ftrace_addr.
409 if (memcmp(&ins
[1], &nop
[1], MCOUNT_INSN_SIZE
- 1) != 0) {
411 * For extra paranoidism, we check if the breakpoint is on
412 * a call that would actually jump to the ftrace_addr.
413 * If not, don't touch the breakpoint, we make just create
416 ftrace_addr
= ftrace_get_addr_new(rec
);
417 nop
= ftrace_call_replace(ip
, ftrace_addr
);
419 if (memcmp(&ins
[1], &nop
[1], MCOUNT_INSN_SIZE
- 1) == 0)
422 /* Check both ftrace_addr and ftrace_old_addr */
423 ftrace_addr
= ftrace_get_addr_curr(rec
);
424 nop
= ftrace_call_replace(ip
, ftrace_addr
);
426 ftrace_expected
= nop
;
428 if (memcmp(&ins
[1], &nop
[1], MCOUNT_INSN_SIZE
- 1) != 0)
433 return ftrace_write(ip
, nop
, 1);
436 static int add_update_code(unsigned long ip
, unsigned const char *new)
438 /* skip breakpoint */
441 return ftrace_write(ip
, new, MCOUNT_INSN_SIZE
- 1);
444 static int add_update_call(struct dyn_ftrace
*rec
, unsigned long addr
)
446 unsigned long ip
= rec
->ip
;
447 unsigned const char *new;
449 new = ftrace_call_replace(ip
, addr
);
450 return add_update_code(ip
, new);
453 static int add_update_nop(struct dyn_ftrace
*rec
)
455 unsigned long ip
= rec
->ip
;
456 unsigned const char *new;
458 new = ftrace_nop_replace();
459 return add_update_code(ip
, new);
462 static int add_update(struct dyn_ftrace
*rec
, int enable
)
464 unsigned long ftrace_addr
;
467 ret
= ftrace_test_record(rec
, enable
);
469 ftrace_addr
= ftrace_get_addr_new(rec
);
472 case FTRACE_UPDATE_IGNORE
:
475 case FTRACE_UPDATE_MODIFY_CALL
:
476 case FTRACE_UPDATE_MAKE_CALL
:
477 /* converting nop to call */
478 return add_update_call(rec
, ftrace_addr
);
480 case FTRACE_UPDATE_MAKE_NOP
:
481 /* converting a call to a nop */
482 return add_update_nop(rec
);
488 static int finish_update_call(struct dyn_ftrace
*rec
, unsigned long addr
)
490 unsigned long ip
= rec
->ip
;
491 unsigned const char *new;
493 new = ftrace_call_replace(ip
, addr
);
495 return ftrace_write(ip
, new, 1);
498 static int finish_update_nop(struct dyn_ftrace
*rec
)
500 unsigned long ip
= rec
->ip
;
501 unsigned const char *new;
503 new = ftrace_nop_replace();
505 return ftrace_write(ip
, new, 1);
508 static int finish_update(struct dyn_ftrace
*rec
, int enable
)
510 unsigned long ftrace_addr
;
513 ret
= ftrace_update_record(rec
, enable
);
515 ftrace_addr
= ftrace_get_addr_new(rec
);
518 case FTRACE_UPDATE_IGNORE
:
521 case FTRACE_UPDATE_MODIFY_CALL
:
522 case FTRACE_UPDATE_MAKE_CALL
:
523 /* converting nop to call */
524 return finish_update_call(rec
, ftrace_addr
);
526 case FTRACE_UPDATE_MAKE_NOP
:
527 /* converting a call to a nop */
528 return finish_update_nop(rec
);
534 static void do_sync_core(void *data
)
539 static void run_sync(void)
543 /* No need to sync if there's only one CPU */
544 if (num_online_cpus() == 1)
547 enable_irqs
= irqs_disabled();
549 /* We may be called with interrupts disabled (on bootup). */
552 on_each_cpu(do_sync_core
, NULL
, 1);
557 void ftrace_replace_code(int enable
)
559 struct ftrace_rec_iter
*iter
;
560 struct dyn_ftrace
*rec
;
561 const char *report
= "adding breakpoints";
565 for_ftrace_rec_iter(iter
) {
566 rec
= ftrace_rec_iter_record(iter
);
568 ret
= add_breakpoints(rec
, enable
);
570 goto remove_breakpoints
;
576 report
= "updating code";
579 for_ftrace_rec_iter(iter
) {
580 rec
= ftrace_rec_iter_record(iter
);
582 ret
= add_update(rec
, enable
);
584 goto remove_breakpoints
;
590 report
= "removing breakpoints";
593 for_ftrace_rec_iter(iter
) {
594 rec
= ftrace_rec_iter_record(iter
);
596 ret
= finish_update(rec
, enable
);
598 goto remove_breakpoints
;
607 pr_warn("Failed on %s (%d):\n", report
, count
);
608 ftrace_bug(ret
, rec
);
609 for_ftrace_rec_iter(iter
) {
610 rec
= ftrace_rec_iter_record(iter
);
612 * Breakpoints are handled only when this function is in
613 * progress. The system could not work with them.
615 if (remove_breakpoint(rec
))
622 ftrace_modify_code(unsigned long ip
, unsigned const char *old_code
,
623 unsigned const char *new_code
)
627 ret
= add_break(ip
, old_code
);
633 ret
= add_update_code(ip
, new_code
);
639 ret
= ftrace_write(ip
, new_code
, 1);
641 * The breakpoint is handled only when this function is in progress.
642 * The system could not work if we could not remove it.
650 /* Also here the system could not work with the breakpoint */
651 if (ftrace_write(ip
, old_code
, 1))
656 void arch_ftrace_update_code(int command
)
658 /* See comment above by declaration of modifying_ftrace_code */
659 atomic_inc(&modifying_ftrace_code
);
661 ftrace_modify_all_code(command
);
663 atomic_dec(&modifying_ftrace_code
);
666 int __init
ftrace_dyn_arch_init(void)
671 /* Currently only x86_64 supports dynamic trampolines */
674 #ifdef CONFIG_MODULES
675 #include <linux/moduleloader.h>
676 /* Module allocation simplifies allocating memory for code */
677 static inline void *alloc_tramp(unsigned long size
)
679 return module_alloc(size
);
681 static inline void tramp_free(void *tramp
, int size
)
683 int npages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
685 set_memory_nx((unsigned long)tramp
, npages
);
686 set_memory_rw((unsigned long)tramp
, npages
);
687 module_memfree(tramp
);
690 /* Trampolines can only be created if modules are supported */
691 static inline void *alloc_tramp(unsigned long size
)
695 static inline void tramp_free(void *tramp
, int size
) { }
698 /* Defined as markers to the end of the ftrace default trampolines */
699 extern void ftrace_regs_caller_end(void);
700 extern void ftrace_epilogue(void);
701 extern void ftrace_caller_op_ptr(void);
702 extern void ftrace_regs_caller_op_ptr(void);
704 /* movq function_trace_op(%rip), %rdx */
705 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
706 #define OP_REF_SIZE 7
709 * The ftrace_ops is passed to the function callback. Since the
710 * trampoline only services a single ftrace_ops, we can pass in
713 * The ftrace_op_code_union is used to create a pointer to the
714 * ftrace_ops that will be passed to the callback function.
716 union ftrace_op_code_union
{
717 char code
[OP_REF_SIZE
];
721 } __attribute__((packed
));
727 create_trampoline(struct ftrace_ops
*ops
, unsigned int *tramp_size
)
729 unsigned long start_offset
;
730 unsigned long end_offset
;
731 unsigned long op_offset
;
732 unsigned long offset
;
738 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
739 unsigned const char op_ref
[] = { 0x48, 0x8b, 0x15 };
740 union ftrace_op_code_union op_ptr
;
743 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
744 start_offset
= (unsigned long)ftrace_regs_caller
;
745 end_offset
= (unsigned long)ftrace_regs_caller_end
;
746 op_offset
= (unsigned long)ftrace_regs_caller_op_ptr
;
748 start_offset
= (unsigned long)ftrace_caller
;
749 end_offset
= (unsigned long)ftrace_epilogue
;
750 op_offset
= (unsigned long)ftrace_caller_op_ptr
;
753 size
= end_offset
- start_offset
;
756 * Allocate enough size to store the ftrace_caller code,
757 * the iret , as well as the address of the ftrace_ops this
758 * trampoline is used for.
760 trampoline
= alloc_tramp(size
+ RET_SIZE
+ sizeof(void *));
764 *tramp_size
= size
+ RET_SIZE
+ sizeof(void *);
766 /* Copy ftrace_caller onto the trampoline memory */
767 ret
= probe_kernel_read(trampoline
, (void *)start_offset
, size
);
768 if (WARN_ON(ret
< 0))
771 ip
= trampoline
+ size
;
773 /* The trampoline ends with ret(q) */
774 retq
= (unsigned long)ftrace_stub
;
775 ret
= probe_kernel_read(ip
, (void *)retq
, RET_SIZE
);
776 if (WARN_ON(ret
< 0))
780 * The address of the ftrace_ops that is used for this trampoline
781 * is stored at the end of the trampoline. This will be used to
782 * load the third parameter for the callback. Basically, that
783 * location at the end of the trampoline takes the place of
784 * the global function_trace_op variable.
787 ptr
= (unsigned long *)(trampoline
+ size
+ RET_SIZE
);
788 *ptr
= (unsigned long)ops
;
790 op_offset
-= start_offset
;
791 memcpy(&op_ptr
, trampoline
+ op_offset
, OP_REF_SIZE
);
793 /* Are we pointing to the reference? */
794 if (WARN_ON(memcmp(op_ptr
.op
, op_ref
, 3) != 0))
797 /* Load the contents of ptr into the callback parameter */
798 offset
= (unsigned long)ptr
;
799 offset
-= (unsigned long)trampoline
+ op_offset
+ OP_REF_SIZE
;
801 op_ptr
.offset
= offset
;
803 /* put in the new offset to the ftrace_ops */
804 memcpy(trampoline
+ op_offset
, &op_ptr
, OP_REF_SIZE
);
806 /* ALLOC_TRAMP flags lets us know we created it */
807 ops
->flags
|= FTRACE_OPS_FL_ALLOC_TRAMP
;
809 return (unsigned long)trampoline
;
811 tramp_free(trampoline
, *tramp_size
);
815 static unsigned long calc_trampoline_call_offset(bool save_regs
)
817 unsigned long start_offset
;
818 unsigned long call_offset
;
821 start_offset
= (unsigned long)ftrace_regs_caller
;
822 call_offset
= (unsigned long)ftrace_regs_call
;
824 start_offset
= (unsigned long)ftrace_caller
;
825 call_offset
= (unsigned long)ftrace_call
;
828 return call_offset
- start_offset
;
831 void arch_ftrace_update_trampoline(struct ftrace_ops
*ops
)
835 unsigned long offset
;
840 if (ops
->trampoline
) {
842 * The ftrace_ops caller may set up its own trampoline.
843 * In such a case, this code must not modify it.
845 if (!(ops
->flags
& FTRACE_OPS_FL_ALLOC_TRAMP
))
847 npages
= PAGE_ALIGN(ops
->trampoline_size
) >> PAGE_SHIFT
;
848 set_memory_rw(ops
->trampoline
, npages
);
850 ops
->trampoline
= create_trampoline(ops
, &size
);
851 if (!ops
->trampoline
)
853 ops
->trampoline_size
= size
;
854 npages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
857 offset
= calc_trampoline_call_offset(ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
);
858 ip
= ops
->trampoline
+ offset
;
860 func
= ftrace_ops_get_func(ops
);
862 /* Do a safe modify in case the trampoline is executing */
863 new = ftrace_call_replace(ip
, (unsigned long)func
);
864 ret
= update_ftrace_func(ip
, new);
865 set_memory_ro(ops
->trampoline
, npages
);
867 /* The update should never fail */
871 /* Return the address of the function the trampoline calls */
872 static void *addr_from_call(void *ptr
)
874 union ftrace_code_union calc
;
877 ret
= probe_kernel_read(&calc
, ptr
, MCOUNT_INSN_SIZE
);
878 if (WARN_ON_ONCE(ret
< 0))
881 /* Make sure this is a call */
882 if (WARN_ON_ONCE(calc
.op
!= 0xe8)) {
883 pr_warn("Expected e8, got %x\n", calc
.op
);
887 return ptr
+ MCOUNT_INSN_SIZE
+ calc
.offset
;
890 void prepare_ftrace_return(unsigned long self_addr
, unsigned long *parent
,
891 unsigned long frame_pointer
);
894 * If the ops->trampoline was not allocated, then it probably
895 * has a static trampoline func, or is the ftrace caller itself.
897 static void *static_tramp_func(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
899 unsigned long offset
;
900 bool save_regs
= rec
->flags
& FTRACE_FL_REGS_EN
;
903 if (ops
&& ops
->trampoline
) {
904 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
906 * We only know about function graph tracer setting as static
909 if (ops
->trampoline
== FTRACE_GRAPH_ADDR
)
910 return (void *)prepare_ftrace_return
;
915 offset
= calc_trampoline_call_offset(save_regs
);
918 ptr
= (void *)FTRACE_REGS_ADDR
+ offset
;
920 ptr
= (void *)FTRACE_ADDR
+ offset
;
922 return addr_from_call(ptr
);
925 void *arch_ftrace_trampoline_func(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
927 unsigned long offset
;
929 /* If we didn't allocate this trampoline, consider it static */
930 if (!ops
|| !(ops
->flags
& FTRACE_OPS_FL_ALLOC_TRAMP
))
931 return static_tramp_func(ops
, rec
);
933 offset
= calc_trampoline_call_offset(ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
);
934 return addr_from_call((void *)ops
->trampoline
+ offset
);
937 void arch_ftrace_trampoline_free(struct ftrace_ops
*ops
)
939 if (!ops
|| !(ops
->flags
& FTRACE_OPS_FL_ALLOC_TRAMP
))
942 tramp_free((void *)ops
->trampoline
, ops
->trampoline_size
);
946 #endif /* CONFIG_X86_64 */
947 #endif /* CONFIG_DYNAMIC_FTRACE */
949 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
951 #ifdef CONFIG_DYNAMIC_FTRACE
952 extern void ftrace_graph_call(void);
954 static unsigned char *ftrace_jmp_replace(unsigned long ip
, unsigned long addr
)
956 return ftrace_text_replace(0xe9, ip
, addr
);
959 static int ftrace_mod_jmp(unsigned long ip
, void *func
)
963 new = ftrace_jmp_replace(ip
, (unsigned long)func
);
965 return update_ftrace_func(ip
, new);
968 int ftrace_enable_ftrace_graph_caller(void)
970 unsigned long ip
= (unsigned long)(&ftrace_graph_call
);
972 return ftrace_mod_jmp(ip
, &ftrace_graph_caller
);
975 int ftrace_disable_ftrace_graph_caller(void)
977 unsigned long ip
= (unsigned long)(&ftrace_graph_call
);
979 return ftrace_mod_jmp(ip
, &ftrace_stub
);
982 #endif /* !CONFIG_DYNAMIC_FTRACE */
985 * Hook the return address and push it in the stack of return addrs
986 * in current thread info.
988 void prepare_ftrace_return(unsigned long self_addr
, unsigned long *parent
,
989 unsigned long frame_pointer
)
993 unsigned long return_hooker
= (unsigned long)
997 * When resuming from suspend-to-ram, this function can be indirectly
998 * called from early CPU startup code while the CPU is in real mode,
999 * which would fail miserably. Make sure the stack pointer is a
1002 * This check isn't as accurate as virt_addr_valid(), but it should be
1003 * good enough for this purpose, and it's fast.
1005 if (unlikely((long)__builtin_frame_address(0) >= 0))
1008 if (unlikely(ftrace_graph_is_dead()))
1011 if (unlikely(atomic_read(¤t
->tracing_graph_pause
)))
1015 * Protect against fault, even if it shouldn't
1016 * happen. This tool is too much intrusive to
1017 * ignore such a protection.
1020 "1: " _ASM_MOV
" (%[parent]), %[old]\n"
1021 "2: " _ASM_MOV
" %[return_hooker], (%[parent])\n"
1022 " movl $0, %[faulted]\n"
1025 ".section .fixup, \"ax\"\n"
1026 "4: movl $1, %[faulted]\n"
1030 _ASM_EXTABLE(1b
, 4b
)
1031 _ASM_EXTABLE(2b
, 4b
)
1033 : [old
] "=&r" (old
), [faulted
] "=r" (faulted
)
1034 : [parent
] "r" (parent
), [return_hooker
] "r" (return_hooker
)
1038 if (unlikely(faulted
)) {
1039 ftrace_graph_stop();
1044 if (function_graph_enter(old
, self_addr
, frame_pointer
, parent
))
1047 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */