2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/spinlock.h>
15 #include <linux/hardirq.h>
16 #include <linux/uaccess.h>
17 #include <linux/ftrace.h>
18 #include <linux/percpu.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/init.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
25 #include <trace/syscall.h>
27 #include <asm/cacheflush.h>
28 #include <asm/kprobes.h>
29 #include <asm/ftrace.h>
32 #ifdef CONFIG_DYNAMIC_FTRACE
34 int ftrace_arch_code_modify_prepare(void)
37 set_all_modules_text_rw();
41 int ftrace_arch_code_modify_post_process(void)
43 set_all_modules_text_ro();
48 union ftrace_code_union
{
49 char code
[MCOUNT_INSN_SIZE
];
53 } __attribute__((packed
));
56 static int ftrace_calc_offset(long ip
, long addr
)
58 return (int)(addr
- ip
);
61 static unsigned char *ftrace_call_replace(unsigned long ip
, unsigned long addr
)
63 static union ftrace_code_union calc
;
66 calc
.offset
= ftrace_calc_offset(ip
+ MCOUNT_INSN_SIZE
, addr
);
69 * No locking needed, this must be called via kstop_machine
70 * which in essence is like running on a uniprocessor machine.
76 within(unsigned long addr
, unsigned long start
, unsigned long end
)
78 return addr
>= start
&& addr
< end
;
81 static unsigned long text_ip_addr(unsigned long ip
)
84 * On x86_64, kernel text mappings are mapped read-only with
85 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
86 * of the kernel text mapping to modify the kernel text.
88 * For 32bit kernels, these mappings are same and we can use
89 * kernel identity mapping to modify code.
91 if (within(ip
, (unsigned long)_text
, (unsigned long)_etext
))
92 ip
= (unsigned long)__va(__pa_symbol(ip
));
97 static const unsigned char *ftrace_nop_replace(void)
99 return ideal_nops
[NOP_ATOMIC5
];
103 ftrace_modify_code_direct(unsigned long ip
, unsigned const char *old_code
,
104 unsigned const char *new_code
)
106 unsigned char replaced
[MCOUNT_INSN_SIZE
];
109 * Note: Due to modules and __init, code can
110 * disappear and change, we need to protect against faulting
111 * as well as code changing. We do this by using the
112 * probe_kernel_* functions.
114 * No real locking needed, this code is run through
115 * kstop_machine, or before SMP starts.
118 /* read the text we want to modify */
119 if (probe_kernel_read(replaced
, (void *)ip
, MCOUNT_INSN_SIZE
))
122 /* Make sure it is what we expect it to be */
123 if (memcmp(replaced
, old_code
, MCOUNT_INSN_SIZE
) != 0)
126 ip
= text_ip_addr(ip
);
128 /* replace the text with the new text */
129 if (probe_kernel_write((void *)ip
, new_code
, MCOUNT_INSN_SIZE
))
137 int ftrace_make_nop(struct module
*mod
,
138 struct dyn_ftrace
*rec
, unsigned long addr
)
140 unsigned const char *new, *old
;
141 unsigned long ip
= rec
->ip
;
143 old
= ftrace_call_replace(ip
, addr
);
144 new = ftrace_nop_replace();
147 * On boot up, and when modules are loaded, the MCOUNT_ADDR
148 * is converted to a nop, and will never become MCOUNT_ADDR
149 * again. This code is either running before SMP (on boot up)
150 * or before the code will ever be executed (module load).
151 * We do not want to use the breakpoint version in this case,
152 * just modify the code directly.
154 if (addr
== MCOUNT_ADDR
)
155 return ftrace_modify_code_direct(rec
->ip
, old
, new);
157 /* Normal cases use add_brk_on_nop */
158 WARN_ONCE(1, "invalid use of ftrace_make_nop");
162 int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
)
164 unsigned const char *new, *old
;
165 unsigned long ip
= rec
->ip
;
167 old
= ftrace_nop_replace();
168 new = ftrace_call_replace(ip
, addr
);
170 /* Should only be called when module is loaded */
171 return ftrace_modify_code_direct(rec
->ip
, old
, new);
175 * The modifying_ftrace_code is used to tell the breakpoint
176 * handler to call ftrace_int3_handler(). If it fails to
177 * call this handler for a breakpoint added by ftrace, then
178 * the kernel may crash.
180 * As atomic_writes on x86 do not need a barrier, we do not
181 * need to add smp_mb()s for this to work. It is also considered
182 * that we can not read the modifying_ftrace_code before
183 * executing the breakpoint. That would be quite remarkable if
184 * it could do that. Here's the flow that is required:
190 * <trap-int3> // implicit (r)mb
191 * if (atomic_read(mfc))
192 * call ftrace_int3_handler()
194 * Then when we are finished:
198 * If we hit a breakpoint that was not set by ftrace, it does not
199 * matter if ftrace_int3_handler() is called or not. It will
200 * simply be ignored. But it is crucial that a ftrace nop/caller
201 * breakpoint is handled. No other user should ever place a
202 * breakpoint on an ftrace nop/caller location. It must only
203 * be done by this code.
205 atomic_t modifying_ftrace_code __read_mostly
;
208 ftrace_modify_code(unsigned long ip
, unsigned const char *old_code
,
209 unsigned const char *new_code
);
212 * Should never be called:
213 * As it is only called by __ftrace_replace_code() which is called by
214 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
215 * which is called to turn mcount into nops or nops into function calls
216 * but not to convert a function from not using regs to one that uses
217 * regs, which ftrace_modify_call() is for.
219 int ftrace_modify_call(struct dyn_ftrace
*rec
, unsigned long old_addr
,
226 static unsigned long ftrace_update_func
;
228 static int update_ftrace_func(unsigned long ip
, void *new)
230 unsigned char old
[MCOUNT_INSN_SIZE
];
233 memcpy(old
, (void *)ip
, MCOUNT_INSN_SIZE
);
235 ftrace_update_func
= ip
;
236 /* Make sure the breakpoints see the ftrace_update_func update */
239 /* See comment above by declaration of modifying_ftrace_code */
240 atomic_inc(&modifying_ftrace_code
);
242 ret
= ftrace_modify_code(ip
, old
, new);
244 atomic_dec(&modifying_ftrace_code
);
249 int ftrace_update_ftrace_func(ftrace_func_t func
)
251 unsigned long ip
= (unsigned long)(&ftrace_call
);
255 new = ftrace_call_replace(ip
, (unsigned long)func
);
256 ret
= update_ftrace_func(ip
, new);
258 /* Also update the regs callback function */
260 ip
= (unsigned long)(&ftrace_regs_call
);
261 new = ftrace_call_replace(ip
, (unsigned long)func
);
262 ret
= update_ftrace_func(ip
, new);
268 static int is_ftrace_caller(unsigned long ip
)
270 if (ip
== ftrace_update_func
)
277 * A breakpoint was added to the code address we are about to
278 * modify, and this is the handle that will just skip over it.
279 * We are either changing a nop into a trace call, or a trace
280 * call to a nop. While the change is taking place, we treat
281 * it just like it was a nop.
283 int ftrace_int3_handler(struct pt_regs
*regs
)
287 if (WARN_ON_ONCE(!regs
))
291 if (!ftrace_location(ip
) && !is_ftrace_caller(ip
))
294 regs
->ip
+= MCOUNT_INSN_SIZE
- 1;
299 static int ftrace_write(unsigned long ip
, const char *val
, int size
)
301 ip
= text_ip_addr(ip
);
303 if (probe_kernel_write((void *)ip
, val
, size
))
309 static int add_break(unsigned long ip
, const char *old
)
311 unsigned char replaced
[MCOUNT_INSN_SIZE
];
312 unsigned char brk
= BREAKPOINT_INSTRUCTION
;
314 if (probe_kernel_read(replaced
, (void *)ip
, MCOUNT_INSN_SIZE
))
317 /* Make sure it is what we expect it to be */
318 if (memcmp(replaced
, old
, MCOUNT_INSN_SIZE
) != 0)
321 return ftrace_write(ip
, &brk
, 1);
324 static int add_brk_on_call(struct dyn_ftrace
*rec
, unsigned long addr
)
326 unsigned const char *old
;
327 unsigned long ip
= rec
->ip
;
329 old
= ftrace_call_replace(ip
, addr
);
331 return add_break(rec
->ip
, old
);
335 static int add_brk_on_nop(struct dyn_ftrace
*rec
)
337 unsigned const char *old
;
339 old
= ftrace_nop_replace();
341 return add_break(rec
->ip
, old
);
344 static int add_breakpoints(struct dyn_ftrace
*rec
, int enable
)
346 unsigned long ftrace_addr
;
349 ftrace_addr
= ftrace_get_addr_curr(rec
);
351 ret
= ftrace_test_record(rec
, enable
);
354 case FTRACE_UPDATE_IGNORE
:
357 case FTRACE_UPDATE_MAKE_CALL
:
358 /* converting nop to call */
359 return add_brk_on_nop(rec
);
361 case FTRACE_UPDATE_MODIFY_CALL
:
362 case FTRACE_UPDATE_MAKE_NOP
:
363 /* converting a call to a nop */
364 return add_brk_on_call(rec
, ftrace_addr
);
370 * On error, we need to remove breakpoints. This needs to
371 * be done caefully. If the address does not currently have a
372 * breakpoint, we know we are done. Otherwise, we look at the
373 * remaining 4 bytes of the instruction. If it matches a nop
374 * we replace the breakpoint with the nop. Otherwise we replace
375 * it with the call instruction.
377 static int remove_breakpoint(struct dyn_ftrace
*rec
)
379 unsigned char ins
[MCOUNT_INSN_SIZE
];
380 unsigned char brk
= BREAKPOINT_INSTRUCTION
;
381 const unsigned char *nop
;
382 unsigned long ftrace_addr
;
383 unsigned long ip
= rec
->ip
;
385 /* If we fail the read, just give up */
386 if (probe_kernel_read(ins
, (void *)ip
, MCOUNT_INSN_SIZE
))
389 /* If this does not have a breakpoint, we are done */
393 nop
= ftrace_nop_replace();
396 * If the last 4 bytes of the instruction do not match
397 * a nop, then we assume that this is a call to ftrace_addr.
399 if (memcmp(&ins
[1], &nop
[1], MCOUNT_INSN_SIZE
- 1) != 0) {
401 * For extra paranoidism, we check if the breakpoint is on
402 * a call that would actually jump to the ftrace_addr.
403 * If not, don't touch the breakpoint, we make just create
406 ftrace_addr
= ftrace_get_addr_new(rec
);
407 nop
= ftrace_call_replace(ip
, ftrace_addr
);
409 if (memcmp(&ins
[1], &nop
[1], MCOUNT_INSN_SIZE
- 1) == 0)
412 /* Check both ftrace_addr and ftrace_old_addr */
413 ftrace_addr
= ftrace_get_addr_curr(rec
);
414 nop
= ftrace_call_replace(ip
, ftrace_addr
);
416 if (memcmp(&ins
[1], &nop
[1], MCOUNT_INSN_SIZE
- 1) != 0)
421 return ftrace_write(ip
, nop
, 1);
424 static int add_update_code(unsigned long ip
, unsigned const char *new)
426 /* skip breakpoint */
429 return ftrace_write(ip
, new, MCOUNT_INSN_SIZE
- 1);
432 static int add_update_call(struct dyn_ftrace
*rec
, unsigned long addr
)
434 unsigned long ip
= rec
->ip
;
435 unsigned const char *new;
437 new = ftrace_call_replace(ip
, addr
);
438 return add_update_code(ip
, new);
441 static int add_update_nop(struct dyn_ftrace
*rec
)
443 unsigned long ip
= rec
->ip
;
444 unsigned const char *new;
446 new = ftrace_nop_replace();
447 return add_update_code(ip
, new);
450 static int add_update(struct dyn_ftrace
*rec
, int enable
)
452 unsigned long ftrace_addr
;
455 ret
= ftrace_test_record(rec
, enable
);
457 ftrace_addr
= ftrace_get_addr_new(rec
);
460 case FTRACE_UPDATE_IGNORE
:
463 case FTRACE_UPDATE_MODIFY_CALL
:
464 case FTRACE_UPDATE_MAKE_CALL
:
465 /* converting nop to call */
466 return add_update_call(rec
, ftrace_addr
);
468 case FTRACE_UPDATE_MAKE_NOP
:
469 /* converting a call to a nop */
470 return add_update_nop(rec
);
476 static int finish_update_call(struct dyn_ftrace
*rec
, unsigned long addr
)
478 unsigned long ip
= rec
->ip
;
479 unsigned const char *new;
481 new = ftrace_call_replace(ip
, addr
);
483 return ftrace_write(ip
, new, 1);
486 static int finish_update_nop(struct dyn_ftrace
*rec
)
488 unsigned long ip
= rec
->ip
;
489 unsigned const char *new;
491 new = ftrace_nop_replace();
493 return ftrace_write(ip
, new, 1);
496 static int finish_update(struct dyn_ftrace
*rec
, int enable
)
498 unsigned long ftrace_addr
;
501 ret
= ftrace_update_record(rec
, enable
);
503 ftrace_addr
= ftrace_get_addr_new(rec
);
506 case FTRACE_UPDATE_IGNORE
:
509 case FTRACE_UPDATE_MODIFY_CALL
:
510 case FTRACE_UPDATE_MAKE_CALL
:
511 /* converting nop to call */
512 return finish_update_call(rec
, ftrace_addr
);
514 case FTRACE_UPDATE_MAKE_NOP
:
515 /* converting a call to a nop */
516 return finish_update_nop(rec
);
522 static void do_sync_core(void *data
)
527 static void run_sync(void)
529 int enable_irqs
= irqs_disabled();
531 /* We may be called with interrupts disbled (on bootup). */
534 on_each_cpu(do_sync_core
, NULL
, 1);
539 void ftrace_replace_code(int enable
)
541 struct ftrace_rec_iter
*iter
;
542 struct dyn_ftrace
*rec
;
543 const char *report
= "adding breakpoints";
547 for_ftrace_rec_iter(iter
) {
548 rec
= ftrace_rec_iter_record(iter
);
550 ret
= add_breakpoints(rec
, enable
);
552 goto remove_breakpoints
;
558 report
= "updating code";
560 for_ftrace_rec_iter(iter
) {
561 rec
= ftrace_rec_iter_record(iter
);
563 ret
= add_update(rec
, enable
);
565 goto remove_breakpoints
;
570 report
= "removing breakpoints";
572 for_ftrace_rec_iter(iter
) {
573 rec
= ftrace_rec_iter_record(iter
);
575 ret
= finish_update(rec
, enable
);
577 goto remove_breakpoints
;
585 pr_warn("Failed on %s (%d):\n", report
, count
);
586 ftrace_bug(ret
, rec
);
587 for_ftrace_rec_iter(iter
) {
588 rec
= ftrace_rec_iter_record(iter
);
590 * Breakpoints are handled only when this function is in
591 * progress. The system could not work with them.
593 if (remove_breakpoint(rec
))
600 ftrace_modify_code(unsigned long ip
, unsigned const char *old_code
,
601 unsigned const char *new_code
)
605 ret
= add_break(ip
, old_code
);
611 ret
= add_update_code(ip
, new_code
);
617 ret
= ftrace_write(ip
, new_code
, 1);
619 * The breakpoint is handled only when this function is in progress.
620 * The system could not work if we could not remove it.
628 /* Also here the system could not work with the breakpoint */
629 if (ftrace_write(ip
, old_code
, 1))
634 void arch_ftrace_update_code(int command
)
636 /* See comment above by declaration of modifying_ftrace_code */
637 atomic_inc(&modifying_ftrace_code
);
639 ftrace_modify_all_code(command
);
641 atomic_dec(&modifying_ftrace_code
);
644 int __init
ftrace_dyn_arch_init(void)
649 #if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
650 static unsigned char *ftrace_jmp_replace(unsigned long ip
, unsigned long addr
)
652 static union ftrace_code_union calc
;
654 /* Jmp not a call (ignore the .e8) */
656 calc
.offset
= ftrace_calc_offset(ip
+ MCOUNT_INSN_SIZE
, addr
);
659 * ftrace external locks synchronize the access to the static variable.
665 /* Currently only x86_64 supports dynamic trampolines */
668 #ifdef CONFIG_MODULES
669 #include <linux/moduleloader.h>
670 /* Module allocation simplifies allocating memory for code */
671 static inline void *alloc_tramp(unsigned long size
)
673 return module_alloc(size
);
675 static inline void tramp_free(void *tramp
)
677 module_memfree(tramp
);
680 /* Trampolines can only be created if modules are supported */
681 static inline void *alloc_tramp(unsigned long size
)
685 static inline void tramp_free(void *tramp
) { }
688 /* Defined as markers to the end of the ftrace default trampolines */
689 extern void ftrace_caller_end(void);
690 extern void ftrace_regs_caller_end(void);
691 extern void ftrace_return(void);
692 extern void ftrace_caller_op_ptr(void);
693 extern void ftrace_regs_caller_op_ptr(void);
695 /* movq function_trace_op(%rip), %rdx */
696 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
697 #define OP_REF_SIZE 7
700 * The ftrace_ops is passed to the function callback. Since the
701 * trampoline only services a single ftrace_ops, we can pass in
704 * The ftrace_op_code_union is used to create a pointer to the
705 * ftrace_ops that will be passed to the callback function.
707 union ftrace_op_code_union
{
708 char code
[OP_REF_SIZE
];
712 } __attribute__((packed
));
716 create_trampoline(struct ftrace_ops
*ops
, unsigned int *tramp_size
)
718 unsigned const char *jmp
;
719 unsigned long start_offset
;
720 unsigned long end_offset
;
721 unsigned long op_offset
;
722 unsigned long offset
;
727 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
728 unsigned const char op_ref
[] = { 0x48, 0x8b, 0x15 };
729 union ftrace_op_code_union op_ptr
;
732 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
733 start_offset
= (unsigned long)ftrace_regs_caller
;
734 end_offset
= (unsigned long)ftrace_regs_caller_end
;
735 op_offset
= (unsigned long)ftrace_regs_caller_op_ptr
;
737 start_offset
= (unsigned long)ftrace_caller
;
738 end_offset
= (unsigned long)ftrace_caller_end
;
739 op_offset
= (unsigned long)ftrace_caller_op_ptr
;
742 size
= end_offset
- start_offset
;
745 * Allocate enough size to store the ftrace_caller code,
746 * the jmp to ftrace_return, as well as the address of
747 * the ftrace_ops this trampoline is used for.
749 trampoline
= alloc_tramp(size
+ MCOUNT_INSN_SIZE
+ sizeof(void *));
753 *tramp_size
= size
+ MCOUNT_INSN_SIZE
+ sizeof(void *);
755 /* Copy ftrace_caller onto the trampoline memory */
756 ret
= probe_kernel_read(trampoline
, (void *)start_offset
, size
);
757 if (WARN_ON(ret
< 0)) {
758 tramp_free(trampoline
);
762 ip
= (unsigned long)trampoline
+ size
;
764 /* The trampoline ends with a jmp to ftrace_return */
765 jmp
= ftrace_jmp_replace(ip
, (unsigned long)ftrace_return
);
766 memcpy(trampoline
+ size
, jmp
, MCOUNT_INSN_SIZE
);
769 * The address of the ftrace_ops that is used for this trampoline
770 * is stored at the end of the trampoline. This will be used to
771 * load the third parameter for the callback. Basically, that
772 * location at the end of the trampoline takes the place of
773 * the global function_trace_op variable.
776 ptr
= (unsigned long *)(trampoline
+ size
+ MCOUNT_INSN_SIZE
);
777 *ptr
= (unsigned long)ops
;
779 op_offset
-= start_offset
;
780 memcpy(&op_ptr
, trampoline
+ op_offset
, OP_REF_SIZE
);
782 /* Are we pointing to the reference? */
783 if (WARN_ON(memcmp(op_ptr
.op
, op_ref
, 3) != 0)) {
784 tramp_free(trampoline
);
788 /* Load the contents of ptr into the callback parameter */
789 offset
= (unsigned long)ptr
;
790 offset
-= (unsigned long)trampoline
+ op_offset
+ OP_REF_SIZE
;
792 op_ptr
.offset
= offset
;
794 /* put in the new offset to the ftrace_ops */
795 memcpy(trampoline
+ op_offset
, &op_ptr
, OP_REF_SIZE
);
797 /* ALLOC_TRAMP flags lets us know we created it */
798 ops
->flags
|= FTRACE_OPS_FL_ALLOC_TRAMP
;
800 return (unsigned long)trampoline
;
803 static unsigned long calc_trampoline_call_offset(bool save_regs
)
805 unsigned long start_offset
;
806 unsigned long call_offset
;
809 start_offset
= (unsigned long)ftrace_regs_caller
;
810 call_offset
= (unsigned long)ftrace_regs_call
;
812 start_offset
= (unsigned long)ftrace_caller
;
813 call_offset
= (unsigned long)ftrace_call
;
816 return call_offset
- start_offset
;
819 void arch_ftrace_update_trampoline(struct ftrace_ops
*ops
)
823 unsigned long offset
;
828 if (ops
->trampoline
) {
830 * The ftrace_ops caller may set up its own trampoline.
831 * In such a case, this code must not modify it.
833 if (!(ops
->flags
& FTRACE_OPS_FL_ALLOC_TRAMP
))
836 ops
->trampoline
= create_trampoline(ops
, &size
);
837 if (!ops
->trampoline
)
839 ops
->trampoline_size
= size
;
842 offset
= calc_trampoline_call_offset(ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
);
843 ip
= ops
->trampoline
+ offset
;
845 func
= ftrace_ops_get_func(ops
);
847 /* Do a safe modify in case the trampoline is executing */
848 new = ftrace_call_replace(ip
, (unsigned long)func
);
849 ret
= update_ftrace_func(ip
, new);
851 /* The update should never fail */
855 /* Return the address of the function the trampoline calls */
856 static void *addr_from_call(void *ptr
)
858 union ftrace_code_union calc
;
861 ret
= probe_kernel_read(&calc
, ptr
, MCOUNT_INSN_SIZE
);
862 if (WARN_ON_ONCE(ret
< 0))
865 /* Make sure this is a call */
866 if (WARN_ON_ONCE(calc
.e8
!= 0xe8)) {
867 pr_warn("Expected e8, got %x\n", calc
.e8
);
871 return ptr
+ MCOUNT_INSN_SIZE
+ calc
.offset
;
874 void prepare_ftrace_return(unsigned long self_addr
, unsigned long *parent
,
875 unsigned long frame_pointer
);
878 * If the ops->trampoline was not allocated, then it probably
879 * has a static trampoline func, or is the ftrace caller itself.
881 static void *static_tramp_func(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
883 unsigned long offset
;
884 bool save_regs
= rec
->flags
& FTRACE_FL_REGS_EN
;
887 if (ops
&& ops
->trampoline
) {
888 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
890 * We only know about function graph tracer setting as static
893 if (ops
->trampoline
== FTRACE_GRAPH_ADDR
)
894 return (void *)prepare_ftrace_return
;
899 offset
= calc_trampoline_call_offset(save_regs
);
902 ptr
= (void *)FTRACE_REGS_ADDR
+ offset
;
904 ptr
= (void *)FTRACE_ADDR
+ offset
;
906 return addr_from_call(ptr
);
909 void *arch_ftrace_trampoline_func(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
911 unsigned long offset
;
913 /* If we didn't allocate this trampoline, consider it static */
914 if (!ops
|| !(ops
->flags
& FTRACE_OPS_FL_ALLOC_TRAMP
))
915 return static_tramp_func(ops
, rec
);
917 offset
= calc_trampoline_call_offset(ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
);
918 return addr_from_call((void *)ops
->trampoline
+ offset
);
921 void arch_ftrace_trampoline_free(struct ftrace_ops
*ops
)
923 if (!ops
|| !(ops
->flags
& FTRACE_OPS_FL_ALLOC_TRAMP
))
926 tramp_free((void *)ops
->trampoline
);
930 #endif /* CONFIG_X86_64 */
931 #endif /* CONFIG_DYNAMIC_FTRACE */
933 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
935 #ifdef CONFIG_DYNAMIC_FTRACE
936 extern void ftrace_graph_call(void);
938 static int ftrace_mod_jmp(unsigned long ip
, void *func
)
942 new = ftrace_jmp_replace(ip
, (unsigned long)func
);
944 return update_ftrace_func(ip
, new);
947 int ftrace_enable_ftrace_graph_caller(void)
949 unsigned long ip
= (unsigned long)(&ftrace_graph_call
);
951 return ftrace_mod_jmp(ip
, &ftrace_graph_caller
);
954 int ftrace_disable_ftrace_graph_caller(void)
956 unsigned long ip
= (unsigned long)(&ftrace_graph_call
);
958 return ftrace_mod_jmp(ip
, &ftrace_stub
);
961 #endif /* !CONFIG_DYNAMIC_FTRACE */
964 * Hook the return address and push it in the stack of return addrs
965 * in current thread info.
967 void prepare_ftrace_return(unsigned long self_addr
, unsigned long *parent
,
968 unsigned long frame_pointer
)
972 struct ftrace_graph_ent trace
;
973 unsigned long return_hooker
= (unsigned long)
976 if (unlikely(ftrace_graph_is_dead()))
979 if (unlikely(atomic_read(¤t
->tracing_graph_pause
)))
983 * Protect against fault, even if it shouldn't
984 * happen. This tool is too much intrusive to
985 * ignore such a protection.
988 "1: " _ASM_MOV
" (%[parent]), %[old]\n"
989 "2: " _ASM_MOV
" %[return_hooker], (%[parent])\n"
990 " movl $0, %[faulted]\n"
993 ".section .fixup, \"ax\"\n"
994 "4: movl $1, %[faulted]\n"
1001 : [old
] "=&r" (old
), [faulted
] "=r" (faulted
)
1002 : [parent
] "r" (parent
), [return_hooker
] "r" (return_hooker
)
1006 if (unlikely(faulted
)) {
1007 ftrace_graph_stop();
1012 trace
.func
= self_addr
;
1013 trace
.depth
= current
->curr_ret_stack
+ 1;
1015 /* Only trace if the calling function expects to */
1016 if (!ftrace_graph_entry(&trace
)) {
1021 if (ftrace_push_return_trace(old
, self_addr
, &trace
.depth
,
1022 frame_pointer
) == -EBUSY
) {
1027 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */