x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / arch / x86 / kernel / ftrace.c
blob7acb87cb2da83309f86df91dcc239044037ee1e3
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Dynamic function tracing support.
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/spinlock.h>
16 #include <linux/hardirq.h>
17 #include <linux/uaccess.h>
18 #include <linux/ftrace.h>
19 #include <linux/percpu.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
26 #include <trace/syscall.h>
28 #include <asm/set_memory.h>
29 #include <asm/kprobes.h>
30 #include <asm/sections.h>
31 #include <asm/ftrace.h>
32 #include <asm/nops.h>
34 #ifdef CONFIG_DYNAMIC_FTRACE
36 int ftrace_arch_code_modify_prepare(void)
38 set_kernel_text_rw();
39 set_all_modules_text_rw();
40 return 0;
43 int ftrace_arch_code_modify_post_process(void)
45 set_all_modules_text_ro();
46 set_kernel_text_ro();
47 return 0;
50 union ftrace_code_union {
51 char code[MCOUNT_INSN_SIZE];
52 struct {
53 unsigned char e8;
54 int offset;
55 } __attribute__((packed));
58 static int ftrace_calc_offset(long ip, long addr)
60 return (int)(addr - ip);
63 static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
65 static union ftrace_code_union calc;
67 calc.e8 = 0xe8;
68 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
71 * No locking needed, this must be called via kstop_machine
72 * which in essence is like running on a uniprocessor machine.
74 return calc.code;
77 static inline int
78 within(unsigned long addr, unsigned long start, unsigned long end)
80 return addr >= start && addr < end;
83 static unsigned long text_ip_addr(unsigned long ip)
86 * On x86_64, kernel text mappings are mapped read-only, so we use
87 * the kernel identity mapping instead of the kernel text mapping
88 * to modify the kernel text.
90 * For 32bit kernels, these mappings are same and we can use
91 * kernel identity mapping to modify code.
93 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
94 ip = (unsigned long)__va(__pa_symbol(ip));
96 return ip;
99 static const unsigned char *ftrace_nop_replace(void)
101 return ideal_nops[NOP_ATOMIC5];
104 static int
105 ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
106 unsigned const char *new_code)
108 unsigned char replaced[MCOUNT_INSN_SIZE];
110 ftrace_expected = old_code;
113 * Note:
114 * We are paranoid about modifying text, as if a bug was to happen, it
115 * could cause us to read or write to someplace that could cause harm.
116 * Carefully read and modify the code with probe_kernel_*(), and make
117 * sure what we read is what we expected it to be before modifying it.
120 /* read the text we want to modify */
121 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
122 return -EFAULT;
124 /* Make sure it is what we expect it to be */
125 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
126 return -EINVAL;
128 ip = text_ip_addr(ip);
130 /* replace the text with the new text */
131 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
132 return -EPERM;
134 sync_core();
136 return 0;
139 int ftrace_make_nop(struct module *mod,
140 struct dyn_ftrace *rec, unsigned long addr)
142 unsigned const char *new, *old;
143 unsigned long ip = rec->ip;
145 old = ftrace_call_replace(ip, addr);
146 new = ftrace_nop_replace();
149 * On boot up, and when modules are loaded, the MCOUNT_ADDR
150 * is converted to a nop, and will never become MCOUNT_ADDR
151 * again. This code is either running before SMP (on boot up)
152 * or before the code will ever be executed (module load).
153 * We do not want to use the breakpoint version in this case,
154 * just modify the code directly.
156 if (addr == MCOUNT_ADDR)
157 return ftrace_modify_code_direct(rec->ip, old, new);
159 ftrace_expected = NULL;
161 /* Normal cases use add_brk_on_nop */
162 WARN_ONCE(1, "invalid use of ftrace_make_nop");
163 return -EINVAL;
166 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
168 unsigned const char *new, *old;
169 unsigned long ip = rec->ip;
171 old = ftrace_nop_replace();
172 new = ftrace_call_replace(ip, addr);
174 /* Should only be called when module is loaded */
175 return ftrace_modify_code_direct(rec->ip, old, new);
179 * The modifying_ftrace_code is used to tell the breakpoint
180 * handler to call ftrace_int3_handler(). If it fails to
181 * call this handler for a breakpoint added by ftrace, then
182 * the kernel may crash.
184 * As atomic_writes on x86 do not need a barrier, we do not
185 * need to add smp_mb()s for this to work. It is also considered
186 * that we can not read the modifying_ftrace_code before
187 * executing the breakpoint. That would be quite remarkable if
188 * it could do that. Here's the flow that is required:
190 * CPU-0 CPU-1
192 * atomic_inc(mfc);
193 * write int3s
194 * <trap-int3> // implicit (r)mb
195 * if (atomic_read(mfc))
196 * call ftrace_int3_handler()
198 * Then when we are finished:
200 * atomic_dec(mfc);
202 * If we hit a breakpoint that was not set by ftrace, it does not
203 * matter if ftrace_int3_handler() is called or not. It will
204 * simply be ignored. But it is crucial that a ftrace nop/caller
205 * breakpoint is handled. No other user should ever place a
206 * breakpoint on an ftrace nop/caller location. It must only
207 * be done by this code.
209 atomic_t modifying_ftrace_code __read_mostly;
211 static int
212 ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
213 unsigned const char *new_code);
216 * Should never be called:
217 * As it is only called by __ftrace_replace_code() which is called by
218 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
219 * which is called to turn mcount into nops or nops into function calls
220 * but not to convert a function from not using regs to one that uses
221 * regs, which ftrace_modify_call() is for.
223 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
224 unsigned long addr)
226 WARN_ON(1);
227 ftrace_expected = NULL;
228 return -EINVAL;
231 static unsigned long ftrace_update_func;
233 static int update_ftrace_func(unsigned long ip, void *new)
235 unsigned char old[MCOUNT_INSN_SIZE];
236 int ret;
238 memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
240 ftrace_update_func = ip;
241 /* Make sure the breakpoints see the ftrace_update_func update */
242 smp_wmb();
244 /* See comment above by declaration of modifying_ftrace_code */
245 atomic_inc(&modifying_ftrace_code);
247 ret = ftrace_modify_code(ip, old, new);
249 atomic_dec(&modifying_ftrace_code);
251 return ret;
254 int ftrace_update_ftrace_func(ftrace_func_t func)
256 unsigned long ip = (unsigned long)(&ftrace_call);
257 unsigned char *new;
258 int ret;
260 new = ftrace_call_replace(ip, (unsigned long)func);
261 ret = update_ftrace_func(ip, new);
263 /* Also update the regs callback function */
264 if (!ret) {
265 ip = (unsigned long)(&ftrace_regs_call);
266 new = ftrace_call_replace(ip, (unsigned long)func);
267 ret = update_ftrace_func(ip, new);
270 return ret;
273 static int is_ftrace_caller(unsigned long ip)
275 if (ip == ftrace_update_func)
276 return 1;
278 return 0;
282 * A breakpoint was added to the code address we are about to
283 * modify, and this is the handle that will just skip over it.
284 * We are either changing a nop into a trace call, or a trace
285 * call to a nop. While the change is taking place, we treat
286 * it just like it was a nop.
288 int ftrace_int3_handler(struct pt_regs *regs)
290 unsigned long ip;
292 if (WARN_ON_ONCE(!regs))
293 return 0;
295 ip = regs->ip - 1;
296 if (!ftrace_location(ip) && !is_ftrace_caller(ip))
297 return 0;
299 regs->ip += MCOUNT_INSN_SIZE - 1;
301 return 1;
304 static int ftrace_write(unsigned long ip, const char *val, int size)
306 ip = text_ip_addr(ip);
308 if (probe_kernel_write((void *)ip, val, size))
309 return -EPERM;
311 return 0;
314 static int add_break(unsigned long ip, const char *old)
316 unsigned char replaced[MCOUNT_INSN_SIZE];
317 unsigned char brk = BREAKPOINT_INSTRUCTION;
319 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
320 return -EFAULT;
322 ftrace_expected = old;
324 /* Make sure it is what we expect it to be */
325 if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
326 return -EINVAL;
328 return ftrace_write(ip, &brk, 1);
331 static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
333 unsigned const char *old;
334 unsigned long ip = rec->ip;
336 old = ftrace_call_replace(ip, addr);
338 return add_break(rec->ip, old);
342 static int add_brk_on_nop(struct dyn_ftrace *rec)
344 unsigned const char *old;
346 old = ftrace_nop_replace();
348 return add_break(rec->ip, old);
351 static int add_breakpoints(struct dyn_ftrace *rec, int enable)
353 unsigned long ftrace_addr;
354 int ret;
356 ftrace_addr = ftrace_get_addr_curr(rec);
358 ret = ftrace_test_record(rec, enable);
360 switch (ret) {
361 case FTRACE_UPDATE_IGNORE:
362 return 0;
364 case FTRACE_UPDATE_MAKE_CALL:
365 /* converting nop to call */
366 return add_brk_on_nop(rec);
368 case FTRACE_UPDATE_MODIFY_CALL:
369 case FTRACE_UPDATE_MAKE_NOP:
370 /* converting a call to a nop */
371 return add_brk_on_call(rec, ftrace_addr);
373 return 0;
377 * On error, we need to remove breakpoints. This needs to
378 * be done caefully. If the address does not currently have a
379 * breakpoint, we know we are done. Otherwise, we look at the
380 * remaining 4 bytes of the instruction. If it matches a nop
381 * we replace the breakpoint with the nop. Otherwise we replace
382 * it with the call instruction.
384 static int remove_breakpoint(struct dyn_ftrace *rec)
386 unsigned char ins[MCOUNT_INSN_SIZE];
387 unsigned char brk = BREAKPOINT_INSTRUCTION;
388 const unsigned char *nop;
389 unsigned long ftrace_addr;
390 unsigned long ip = rec->ip;
392 /* If we fail the read, just give up */
393 if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
394 return -EFAULT;
396 /* If this does not have a breakpoint, we are done */
397 if (ins[0] != brk)
398 return 0;
400 nop = ftrace_nop_replace();
403 * If the last 4 bytes of the instruction do not match
404 * a nop, then we assume that this is a call to ftrace_addr.
406 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
408 * For extra paranoidism, we check if the breakpoint is on
409 * a call that would actually jump to the ftrace_addr.
410 * If not, don't touch the breakpoint, we make just create
411 * a disaster.
413 ftrace_addr = ftrace_get_addr_new(rec);
414 nop = ftrace_call_replace(ip, ftrace_addr);
416 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
417 goto update;
419 /* Check both ftrace_addr and ftrace_old_addr */
420 ftrace_addr = ftrace_get_addr_curr(rec);
421 nop = ftrace_call_replace(ip, ftrace_addr);
423 ftrace_expected = nop;
425 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
426 return -EINVAL;
429 update:
430 return ftrace_write(ip, nop, 1);
433 static int add_update_code(unsigned long ip, unsigned const char *new)
435 /* skip breakpoint */
436 ip++;
437 new++;
438 return ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1);
441 static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
443 unsigned long ip = rec->ip;
444 unsigned const char *new;
446 new = ftrace_call_replace(ip, addr);
447 return add_update_code(ip, new);
450 static int add_update_nop(struct dyn_ftrace *rec)
452 unsigned long ip = rec->ip;
453 unsigned const char *new;
455 new = ftrace_nop_replace();
456 return add_update_code(ip, new);
459 static int add_update(struct dyn_ftrace *rec, int enable)
461 unsigned long ftrace_addr;
462 int ret;
464 ret = ftrace_test_record(rec, enable);
466 ftrace_addr = ftrace_get_addr_new(rec);
468 switch (ret) {
469 case FTRACE_UPDATE_IGNORE:
470 return 0;
472 case FTRACE_UPDATE_MODIFY_CALL:
473 case FTRACE_UPDATE_MAKE_CALL:
474 /* converting nop to call */
475 return add_update_call(rec, ftrace_addr);
477 case FTRACE_UPDATE_MAKE_NOP:
478 /* converting a call to a nop */
479 return add_update_nop(rec);
482 return 0;
485 static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
487 unsigned long ip = rec->ip;
488 unsigned const char *new;
490 new = ftrace_call_replace(ip, addr);
492 return ftrace_write(ip, new, 1);
495 static int finish_update_nop(struct dyn_ftrace *rec)
497 unsigned long ip = rec->ip;
498 unsigned const char *new;
500 new = ftrace_nop_replace();
502 return ftrace_write(ip, new, 1);
505 static int finish_update(struct dyn_ftrace *rec, int enable)
507 unsigned long ftrace_addr;
508 int ret;
510 ret = ftrace_update_record(rec, enable);
512 ftrace_addr = ftrace_get_addr_new(rec);
514 switch (ret) {
515 case FTRACE_UPDATE_IGNORE:
516 return 0;
518 case FTRACE_UPDATE_MODIFY_CALL:
519 case FTRACE_UPDATE_MAKE_CALL:
520 /* converting nop to call */
521 return finish_update_call(rec, ftrace_addr);
523 case FTRACE_UPDATE_MAKE_NOP:
524 /* converting a call to a nop */
525 return finish_update_nop(rec);
528 return 0;
531 static void do_sync_core(void *data)
533 sync_core();
536 static void run_sync(void)
538 int enable_irqs;
540 /* No need to sync if there's only one CPU */
541 if (num_online_cpus() == 1)
542 return;
544 enable_irqs = irqs_disabled();
546 /* We may be called with interrupts disabled (on bootup). */
547 if (enable_irqs)
548 local_irq_enable();
549 on_each_cpu(do_sync_core, NULL, 1);
550 if (enable_irqs)
551 local_irq_disable();
554 void ftrace_replace_code(int enable)
556 struct ftrace_rec_iter *iter;
557 struct dyn_ftrace *rec;
558 const char *report = "adding breakpoints";
559 int count = 0;
560 int ret;
562 for_ftrace_rec_iter(iter) {
563 rec = ftrace_rec_iter_record(iter);
565 ret = add_breakpoints(rec, enable);
566 if (ret)
567 goto remove_breakpoints;
568 count++;
571 run_sync();
573 report = "updating code";
574 count = 0;
576 for_ftrace_rec_iter(iter) {
577 rec = ftrace_rec_iter_record(iter);
579 ret = add_update(rec, enable);
580 if (ret)
581 goto remove_breakpoints;
582 count++;
585 run_sync();
587 report = "removing breakpoints";
588 count = 0;
590 for_ftrace_rec_iter(iter) {
591 rec = ftrace_rec_iter_record(iter);
593 ret = finish_update(rec, enable);
594 if (ret)
595 goto remove_breakpoints;
596 count++;
599 run_sync();
601 return;
603 remove_breakpoints:
604 pr_warn("Failed on %s (%d):\n", report, count);
605 ftrace_bug(ret, rec);
606 for_ftrace_rec_iter(iter) {
607 rec = ftrace_rec_iter_record(iter);
609 * Breakpoints are handled only when this function is in
610 * progress. The system could not work with them.
612 if (remove_breakpoint(rec))
613 BUG();
615 run_sync();
618 static int
619 ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
620 unsigned const char *new_code)
622 int ret;
624 ret = add_break(ip, old_code);
625 if (ret)
626 goto out;
628 run_sync();
630 ret = add_update_code(ip, new_code);
631 if (ret)
632 goto fail_update;
634 run_sync();
636 ret = ftrace_write(ip, new_code, 1);
638 * The breakpoint is handled only when this function is in progress.
639 * The system could not work if we could not remove it.
641 BUG_ON(ret);
642 out:
643 run_sync();
644 return ret;
646 fail_update:
647 /* Also here the system could not work with the breakpoint */
648 if (ftrace_write(ip, old_code, 1))
649 BUG();
650 goto out;
653 void arch_ftrace_update_code(int command)
655 /* See comment above by declaration of modifying_ftrace_code */
656 atomic_inc(&modifying_ftrace_code);
658 ftrace_modify_all_code(command);
660 atomic_dec(&modifying_ftrace_code);
663 int __init ftrace_dyn_arch_init(void)
665 return 0;
668 #if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
669 static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
671 static union ftrace_code_union calc;
673 /* Jmp not a call (ignore the .e8) */
674 calc.e8 = 0xe9;
675 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
678 * ftrace external locks synchronize the access to the static variable.
680 return calc.code;
682 #endif
684 /* Currently only x86_64 supports dynamic trampolines */
685 #ifdef CONFIG_X86_64
687 #ifdef CONFIG_MODULES
688 #include <linux/moduleloader.h>
689 /* Module allocation simplifies allocating memory for code */
690 static inline void *alloc_tramp(unsigned long size)
692 return module_alloc(size);
694 static inline void tramp_free(void *tramp, int size)
696 int npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
698 set_memory_nx((unsigned long)tramp, npages);
699 set_memory_rw((unsigned long)tramp, npages);
700 module_memfree(tramp);
702 #else
703 /* Trampolines can only be created if modules are supported */
704 static inline void *alloc_tramp(unsigned long size)
706 return NULL;
708 static inline void tramp_free(void *tramp, int size) { }
709 #endif
711 /* Defined as markers to the end of the ftrace default trampolines */
712 extern void ftrace_regs_caller_end(void);
713 extern void ftrace_epilogue(void);
714 extern void ftrace_caller_op_ptr(void);
715 extern void ftrace_regs_caller_op_ptr(void);
717 /* movq function_trace_op(%rip), %rdx */
718 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
719 #define OP_REF_SIZE 7
722 * The ftrace_ops is passed to the function callback. Since the
723 * trampoline only services a single ftrace_ops, we can pass in
724 * that ops directly.
726 * The ftrace_op_code_union is used to create a pointer to the
727 * ftrace_ops that will be passed to the callback function.
729 union ftrace_op_code_union {
730 char code[OP_REF_SIZE];
731 struct {
732 char op[3];
733 int offset;
734 } __attribute__((packed));
737 static unsigned long
738 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
740 unsigned const char *jmp;
741 unsigned long start_offset;
742 unsigned long end_offset;
743 unsigned long op_offset;
744 unsigned long offset;
745 unsigned long size;
746 unsigned long ip;
747 unsigned long *ptr;
748 void *trampoline;
749 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
750 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
751 union ftrace_op_code_union op_ptr;
752 int ret;
754 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
755 start_offset = (unsigned long)ftrace_regs_caller;
756 end_offset = (unsigned long)ftrace_regs_caller_end;
757 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
758 } else {
759 start_offset = (unsigned long)ftrace_caller;
760 end_offset = (unsigned long)ftrace_epilogue;
761 op_offset = (unsigned long)ftrace_caller_op_ptr;
764 size = end_offset - start_offset;
767 * Allocate enough size to store the ftrace_caller code,
768 * the jmp to ftrace_epilogue, as well as the address of
769 * the ftrace_ops this trampoline is used for.
771 trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
772 if (!trampoline)
773 return 0;
775 *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
777 /* Copy ftrace_caller onto the trampoline memory */
778 ret = probe_kernel_read(trampoline, (void *)start_offset, size);
779 if (WARN_ON(ret < 0)) {
780 tramp_free(trampoline, *tramp_size);
781 return 0;
784 ip = (unsigned long)trampoline + size;
786 /* The trampoline ends with a jmp to ftrace_epilogue */
787 jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue);
788 memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
791 * The address of the ftrace_ops that is used for this trampoline
792 * is stored at the end of the trampoline. This will be used to
793 * load the third parameter for the callback. Basically, that
794 * location at the end of the trampoline takes the place of
795 * the global function_trace_op variable.
798 ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
799 *ptr = (unsigned long)ops;
801 op_offset -= start_offset;
802 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
804 /* Are we pointing to the reference? */
805 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
806 tramp_free(trampoline, *tramp_size);
807 return 0;
810 /* Load the contents of ptr into the callback parameter */
811 offset = (unsigned long)ptr;
812 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
814 op_ptr.offset = offset;
816 /* put in the new offset to the ftrace_ops */
817 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
819 /* ALLOC_TRAMP flags lets us know we created it */
820 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
822 return (unsigned long)trampoline;
825 static unsigned long calc_trampoline_call_offset(bool save_regs)
827 unsigned long start_offset;
828 unsigned long call_offset;
830 if (save_regs) {
831 start_offset = (unsigned long)ftrace_regs_caller;
832 call_offset = (unsigned long)ftrace_regs_call;
833 } else {
834 start_offset = (unsigned long)ftrace_caller;
835 call_offset = (unsigned long)ftrace_call;
838 return call_offset - start_offset;
841 void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
843 ftrace_func_t func;
844 unsigned char *new;
845 unsigned long offset;
846 unsigned long ip;
847 unsigned int size;
848 int ret, npages;
850 if (ops->trampoline) {
852 * The ftrace_ops caller may set up its own trampoline.
853 * In such a case, this code must not modify it.
855 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
856 return;
857 npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT;
858 set_memory_rw(ops->trampoline, npages);
859 } else {
860 ops->trampoline = create_trampoline(ops, &size);
861 if (!ops->trampoline)
862 return;
863 ops->trampoline_size = size;
864 npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
867 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
868 ip = ops->trampoline + offset;
870 func = ftrace_ops_get_func(ops);
872 /* Do a safe modify in case the trampoline is executing */
873 new = ftrace_call_replace(ip, (unsigned long)func);
874 ret = update_ftrace_func(ip, new);
875 set_memory_ro(ops->trampoline, npages);
877 /* The update should never fail */
878 WARN_ON(ret);
881 /* Return the address of the function the trampoline calls */
882 static void *addr_from_call(void *ptr)
884 union ftrace_code_union calc;
885 int ret;
887 ret = probe_kernel_read(&calc, ptr, MCOUNT_INSN_SIZE);
888 if (WARN_ON_ONCE(ret < 0))
889 return NULL;
891 /* Make sure this is a call */
892 if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
893 pr_warn("Expected e8, got %x\n", calc.e8);
894 return NULL;
897 return ptr + MCOUNT_INSN_SIZE + calc.offset;
900 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
901 unsigned long frame_pointer);
904 * If the ops->trampoline was not allocated, then it probably
905 * has a static trampoline func, or is the ftrace caller itself.
907 static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
909 unsigned long offset;
910 bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
911 void *ptr;
913 if (ops && ops->trampoline) {
914 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
916 * We only know about function graph tracer setting as static
917 * trampoline.
919 if (ops->trampoline == FTRACE_GRAPH_ADDR)
920 return (void *)prepare_ftrace_return;
921 #endif
922 return NULL;
925 offset = calc_trampoline_call_offset(save_regs);
927 if (save_regs)
928 ptr = (void *)FTRACE_REGS_ADDR + offset;
929 else
930 ptr = (void *)FTRACE_ADDR + offset;
932 return addr_from_call(ptr);
935 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
937 unsigned long offset;
939 /* If we didn't allocate this trampoline, consider it static */
940 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
941 return static_tramp_func(ops, rec);
943 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
944 return addr_from_call((void *)ops->trampoline + offset);
947 void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
949 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
950 return;
952 tramp_free((void *)ops->trampoline, ops->trampoline_size);
953 ops->trampoline = 0;
956 #endif /* CONFIG_X86_64 */
957 #endif /* CONFIG_DYNAMIC_FTRACE */
959 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
961 #ifdef CONFIG_DYNAMIC_FTRACE
962 extern void ftrace_graph_call(void);
964 static int ftrace_mod_jmp(unsigned long ip, void *func)
966 unsigned char *new;
968 new = ftrace_jmp_replace(ip, (unsigned long)func);
970 return update_ftrace_func(ip, new);
973 int ftrace_enable_ftrace_graph_caller(void)
975 unsigned long ip = (unsigned long)(&ftrace_graph_call);
977 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
980 int ftrace_disable_ftrace_graph_caller(void)
982 unsigned long ip = (unsigned long)(&ftrace_graph_call);
984 return ftrace_mod_jmp(ip, &ftrace_stub);
987 #endif /* !CONFIG_DYNAMIC_FTRACE */
990 * Hook the return address and push it in the stack of return addrs
991 * in current thread info.
993 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
994 unsigned long frame_pointer)
996 unsigned long old;
997 int faulted;
998 struct ftrace_graph_ent trace;
999 unsigned long return_hooker = (unsigned long)
1000 &return_to_handler;
1003 * When resuming from suspend-to-ram, this function can be indirectly
1004 * called from early CPU startup code while the CPU is in real mode,
1005 * which would fail miserably. Make sure the stack pointer is a
1006 * virtual address.
1008 * This check isn't as accurate as virt_addr_valid(), but it should be
1009 * good enough for this purpose, and it's fast.
1011 if (unlikely((long)__builtin_frame_address(0) >= 0))
1012 return;
1014 if (unlikely(ftrace_graph_is_dead()))
1015 return;
1017 if (unlikely(atomic_read(&current->tracing_graph_pause)))
1018 return;
1021 * Protect against fault, even if it shouldn't
1022 * happen. This tool is too much intrusive to
1023 * ignore such a protection.
1025 asm volatile(
1026 "1: " _ASM_MOV " (%[parent]), %[old]\n"
1027 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
1028 " movl $0, %[faulted]\n"
1029 "3:\n"
1031 ".section .fixup, \"ax\"\n"
1032 "4: movl $1, %[faulted]\n"
1033 " jmp 3b\n"
1034 ".previous\n"
1036 _ASM_EXTABLE(1b, 4b)
1037 _ASM_EXTABLE(2b, 4b)
1039 : [old] "=&r" (old), [faulted] "=r" (faulted)
1040 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
1041 : "memory"
1044 if (unlikely(faulted)) {
1045 ftrace_graph_stop();
1046 WARN_ON(1);
1047 return;
1050 trace.func = self_addr;
1051 trace.depth = current->curr_ret_stack + 1;
1053 /* Only trace if the calling function expects to */
1054 if (!ftrace_graph_entry(&trace)) {
1055 *parent = old;
1056 return;
1059 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
1060 frame_pointer, parent) == -EBUSY) {
1061 *parent = old;
1062 return;
1065 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */