x86, cpufeature: If we disable CLFLUSH, we should disable CLFLUSHOPT
[linux/fpc-iii.git] / arch / x86 / kernel / ftrace.c
blobe6253195a301ade244143d4d13a73c947602a0cb
1 /*
2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/spinlock.h>
15 #include <linux/hardirq.h>
16 #include <linux/uaccess.h>
17 #include <linux/ftrace.h>
18 #include <linux/percpu.h>
19 #include <linux/sched.h>
20 #include <linux/init.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
24 #include <trace/syscall.h>
26 #include <asm/cacheflush.h>
27 #include <asm/kprobes.h>
28 #include <asm/ftrace.h>
29 #include <asm/nops.h>
31 #ifdef CONFIG_DYNAMIC_FTRACE
33 int ftrace_arch_code_modify_prepare(void)
35 set_kernel_text_rw();
36 set_all_modules_text_rw();
37 return 0;
40 int ftrace_arch_code_modify_post_process(void)
42 set_all_modules_text_ro();
43 set_kernel_text_ro();
44 return 0;
47 union ftrace_code_union {
48 char code[MCOUNT_INSN_SIZE];
49 struct {
50 char e8;
51 int offset;
52 } __attribute__((packed));
55 static int ftrace_calc_offset(long ip, long addr)
57 return (int)(addr - ip);
60 static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
62 static union ftrace_code_union calc;
64 calc.e8 = 0xe8;
65 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
68 * No locking needed, this must be called via kstop_machine
69 * which in essence is like running on a uniprocessor machine.
71 return calc.code;
74 static inline int
75 within(unsigned long addr, unsigned long start, unsigned long end)
77 return addr >= start && addr < end;
80 static unsigned long text_ip_addr(unsigned long ip)
83 * On x86_64, kernel text mappings are mapped read-only with
84 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
85 * of the kernel text mapping to modify the kernel text.
87 * For 32bit kernels, these mappings are same and we can use
88 * kernel identity mapping to modify code.
90 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
91 ip = (unsigned long)__va(__pa_symbol(ip));
93 return ip;
96 static const unsigned char *ftrace_nop_replace(void)
98 return ideal_nops[NOP_ATOMIC5];
101 static int
102 ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
103 unsigned const char *new_code)
105 unsigned char replaced[MCOUNT_INSN_SIZE];
108 * Note: Due to modules and __init, code can
109 * disappear and change, we need to protect against faulting
110 * as well as code changing. We do this by using the
111 * probe_kernel_* functions.
113 * No real locking needed, this code is run through
114 * kstop_machine, or before SMP starts.
117 /* read the text we want to modify */
118 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
119 return -EFAULT;
121 /* Make sure it is what we expect it to be */
122 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
123 return -EINVAL;
125 ip = text_ip_addr(ip);
127 /* replace the text with the new text */
128 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
129 return -EPERM;
131 sync_core();
133 return 0;
136 int ftrace_make_nop(struct module *mod,
137 struct dyn_ftrace *rec, unsigned long addr)
139 unsigned const char *new, *old;
140 unsigned long ip = rec->ip;
142 old = ftrace_call_replace(ip, addr);
143 new = ftrace_nop_replace();
146 * On boot up, and when modules are loaded, the MCOUNT_ADDR
147 * is converted to a nop, and will never become MCOUNT_ADDR
148 * again. This code is either running before SMP (on boot up)
149 * or before the code will ever be executed (module load).
150 * We do not want to use the breakpoint version in this case,
151 * just modify the code directly.
153 if (addr == MCOUNT_ADDR)
154 return ftrace_modify_code_direct(rec->ip, old, new);
156 /* Normal cases use add_brk_on_nop */
157 WARN_ONCE(1, "invalid use of ftrace_make_nop");
158 return -EINVAL;
161 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
163 unsigned const char *new, *old;
164 unsigned long ip = rec->ip;
166 old = ftrace_nop_replace();
167 new = ftrace_call_replace(ip, addr);
169 /* Should only be called when module is loaded */
170 return ftrace_modify_code_direct(rec->ip, old, new);
174 * The modifying_ftrace_code is used to tell the breakpoint
175 * handler to call ftrace_int3_handler(). If it fails to
176 * call this handler for a breakpoint added by ftrace, then
177 * the kernel may crash.
179 * As atomic_writes on x86 do not need a barrier, we do not
180 * need to add smp_mb()s for this to work. It is also considered
181 * that we can not read the modifying_ftrace_code before
182 * executing the breakpoint. That would be quite remarkable if
183 * it could do that. Here's the flow that is required:
185 * CPU-0 CPU-1
187 * atomic_inc(mfc);
188 * write int3s
189 * <trap-int3> // implicit (r)mb
190 * if (atomic_read(mfc))
191 * call ftrace_int3_handler()
193 * Then when we are finished:
195 * atomic_dec(mfc);
197 * If we hit a breakpoint that was not set by ftrace, it does not
198 * matter if ftrace_int3_handler() is called or not. It will
199 * simply be ignored. But it is crucial that a ftrace nop/caller
200 * breakpoint is handled. No other user should ever place a
201 * breakpoint on an ftrace nop/caller location. It must only
202 * be done by this code.
204 atomic_t modifying_ftrace_code __read_mostly;
206 static int
207 ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
208 unsigned const char *new_code);
211 * Should never be called:
212 * As it is only called by __ftrace_replace_code() which is called by
213 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
214 * which is called to turn mcount into nops or nops into function calls
215 * but not to convert a function from not using regs to one that uses
216 * regs, which ftrace_modify_call() is for.
218 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
219 unsigned long addr)
221 WARN_ON(1);
222 return -EINVAL;
225 static unsigned long ftrace_update_func;
227 static int update_ftrace_func(unsigned long ip, void *new)
229 unsigned char old[MCOUNT_INSN_SIZE];
230 int ret;
232 memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
234 ftrace_update_func = ip;
235 /* Make sure the breakpoints see the ftrace_update_func update */
236 smp_wmb();
238 /* See comment above by declaration of modifying_ftrace_code */
239 atomic_inc(&modifying_ftrace_code);
241 ret = ftrace_modify_code(ip, old, new);
243 atomic_dec(&modifying_ftrace_code);
245 return ret;
248 int ftrace_update_ftrace_func(ftrace_func_t func)
250 unsigned long ip = (unsigned long)(&ftrace_call);
251 unsigned char *new;
252 int ret;
254 new = ftrace_call_replace(ip, (unsigned long)func);
255 ret = update_ftrace_func(ip, new);
257 /* Also update the regs callback function */
258 if (!ret) {
259 ip = (unsigned long)(&ftrace_regs_call);
260 new = ftrace_call_replace(ip, (unsigned long)func);
261 ret = update_ftrace_func(ip, new);
264 return ret;
267 static int is_ftrace_caller(unsigned long ip)
269 if (ip == ftrace_update_func)
270 return 1;
272 return 0;
276 * A breakpoint was added to the code address we are about to
277 * modify, and this is the handle that will just skip over it.
278 * We are either changing a nop into a trace call, or a trace
279 * call to a nop. While the change is taking place, we treat
280 * it just like it was a nop.
282 int ftrace_int3_handler(struct pt_regs *regs)
284 unsigned long ip;
286 if (WARN_ON_ONCE(!regs))
287 return 0;
289 ip = regs->ip - 1;
290 if (!ftrace_location(ip) && !is_ftrace_caller(ip))
291 return 0;
293 regs->ip += MCOUNT_INSN_SIZE - 1;
295 return 1;
298 static int ftrace_write(unsigned long ip, const char *val, int size)
301 * On x86_64, kernel text mappings are mapped read-only with
302 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
303 * of the kernel text mapping to modify the kernel text.
305 * For 32bit kernels, these mappings are same and we can use
306 * kernel identity mapping to modify code.
308 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
309 ip = (unsigned long)__va(__pa_symbol(ip));
311 return probe_kernel_write((void *)ip, val, size);
314 static int add_break(unsigned long ip, const char *old)
316 unsigned char replaced[MCOUNT_INSN_SIZE];
317 unsigned char brk = BREAKPOINT_INSTRUCTION;
319 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
320 return -EFAULT;
322 /* Make sure it is what we expect it to be */
323 if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
324 return -EINVAL;
326 if (ftrace_write(ip, &brk, 1))
327 return -EPERM;
329 return 0;
332 static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
334 unsigned const char *old;
335 unsigned long ip = rec->ip;
337 old = ftrace_call_replace(ip, addr);
339 return add_break(rec->ip, old);
343 static int add_brk_on_nop(struct dyn_ftrace *rec)
345 unsigned const char *old;
347 old = ftrace_nop_replace();
349 return add_break(rec->ip, old);
353 * If the record has the FTRACE_FL_REGS set, that means that it
354 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
355 * is not not set, then it wants to convert to the normal callback.
357 static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
359 if (rec->flags & FTRACE_FL_REGS)
360 return (unsigned long)FTRACE_REGS_ADDR;
361 else
362 return (unsigned long)FTRACE_ADDR;
366 * The FTRACE_FL_REGS_EN is set when the record already points to
367 * a function that saves all the regs. Basically the '_EN' version
368 * represents the current state of the function.
370 static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
372 if (rec->flags & FTRACE_FL_REGS_EN)
373 return (unsigned long)FTRACE_REGS_ADDR;
374 else
375 return (unsigned long)FTRACE_ADDR;
378 static int add_breakpoints(struct dyn_ftrace *rec, int enable)
380 unsigned long ftrace_addr;
381 int ret;
383 ret = ftrace_test_record(rec, enable);
385 ftrace_addr = get_ftrace_addr(rec);
387 switch (ret) {
388 case FTRACE_UPDATE_IGNORE:
389 return 0;
391 case FTRACE_UPDATE_MAKE_CALL:
392 /* converting nop to call */
393 return add_brk_on_nop(rec);
395 case FTRACE_UPDATE_MODIFY_CALL_REGS:
396 case FTRACE_UPDATE_MODIFY_CALL:
397 ftrace_addr = get_ftrace_old_addr(rec);
398 /* fall through */
399 case FTRACE_UPDATE_MAKE_NOP:
400 /* converting a call to a nop */
401 return add_brk_on_call(rec, ftrace_addr);
403 return 0;
407 * On error, we need to remove breakpoints. This needs to
408 * be done caefully. If the address does not currently have a
409 * breakpoint, we know we are done. Otherwise, we look at the
410 * remaining 4 bytes of the instruction. If it matches a nop
411 * we replace the breakpoint with the nop. Otherwise we replace
412 * it with the call instruction.
414 static int remove_breakpoint(struct dyn_ftrace *rec)
416 unsigned char ins[MCOUNT_INSN_SIZE];
417 unsigned char brk = BREAKPOINT_INSTRUCTION;
418 const unsigned char *nop;
419 unsigned long ftrace_addr;
420 unsigned long ip = rec->ip;
422 /* If we fail the read, just give up */
423 if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
424 return -EFAULT;
426 /* If this does not have a breakpoint, we are done */
427 if (ins[0] != brk)
428 return -1;
430 nop = ftrace_nop_replace();
433 * If the last 4 bytes of the instruction do not match
434 * a nop, then we assume that this is a call to ftrace_addr.
436 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
438 * For extra paranoidism, we check if the breakpoint is on
439 * a call that would actually jump to the ftrace_addr.
440 * If not, don't touch the breakpoint, we make just create
441 * a disaster.
443 ftrace_addr = get_ftrace_addr(rec);
444 nop = ftrace_call_replace(ip, ftrace_addr);
446 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
447 goto update;
449 /* Check both ftrace_addr and ftrace_old_addr */
450 ftrace_addr = get_ftrace_old_addr(rec);
451 nop = ftrace_call_replace(ip, ftrace_addr);
453 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
454 return -EINVAL;
457 update:
458 return probe_kernel_write((void *)ip, &nop[0], 1);
461 static int add_update_code(unsigned long ip, unsigned const char *new)
463 /* skip breakpoint */
464 ip++;
465 new++;
466 if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1))
467 return -EPERM;
468 return 0;
471 static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
473 unsigned long ip = rec->ip;
474 unsigned const char *new;
476 new = ftrace_call_replace(ip, addr);
477 return add_update_code(ip, new);
480 static int add_update_nop(struct dyn_ftrace *rec)
482 unsigned long ip = rec->ip;
483 unsigned const char *new;
485 new = ftrace_nop_replace();
486 return add_update_code(ip, new);
489 static int add_update(struct dyn_ftrace *rec, int enable)
491 unsigned long ftrace_addr;
492 int ret;
494 ret = ftrace_test_record(rec, enable);
496 ftrace_addr = get_ftrace_addr(rec);
498 switch (ret) {
499 case FTRACE_UPDATE_IGNORE:
500 return 0;
502 case FTRACE_UPDATE_MODIFY_CALL_REGS:
503 case FTRACE_UPDATE_MODIFY_CALL:
504 case FTRACE_UPDATE_MAKE_CALL:
505 /* converting nop to call */
506 return add_update_call(rec, ftrace_addr);
508 case FTRACE_UPDATE_MAKE_NOP:
509 /* converting a call to a nop */
510 return add_update_nop(rec);
513 return 0;
516 static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
518 unsigned long ip = rec->ip;
519 unsigned const char *new;
521 new = ftrace_call_replace(ip, addr);
523 if (ftrace_write(ip, new, 1))
524 return -EPERM;
526 return 0;
529 static int finish_update_nop(struct dyn_ftrace *rec)
531 unsigned long ip = rec->ip;
532 unsigned const char *new;
534 new = ftrace_nop_replace();
536 if (ftrace_write(ip, new, 1))
537 return -EPERM;
538 return 0;
541 static int finish_update(struct dyn_ftrace *rec, int enable)
543 unsigned long ftrace_addr;
544 int ret;
546 ret = ftrace_update_record(rec, enable);
548 ftrace_addr = get_ftrace_addr(rec);
550 switch (ret) {
551 case FTRACE_UPDATE_IGNORE:
552 return 0;
554 case FTRACE_UPDATE_MODIFY_CALL_REGS:
555 case FTRACE_UPDATE_MODIFY_CALL:
556 case FTRACE_UPDATE_MAKE_CALL:
557 /* converting nop to call */
558 return finish_update_call(rec, ftrace_addr);
560 case FTRACE_UPDATE_MAKE_NOP:
561 /* converting a call to a nop */
562 return finish_update_nop(rec);
565 return 0;
568 static void do_sync_core(void *data)
570 sync_core();
573 static void run_sync(void)
575 int enable_irqs = irqs_disabled();
577 /* We may be called with interrupts disbled (on bootup). */
578 if (enable_irqs)
579 local_irq_enable();
580 on_each_cpu(do_sync_core, NULL, 1);
581 if (enable_irqs)
582 local_irq_disable();
585 void ftrace_replace_code(int enable)
587 struct ftrace_rec_iter *iter;
588 struct dyn_ftrace *rec;
589 const char *report = "adding breakpoints";
590 int count = 0;
591 int ret;
593 for_ftrace_rec_iter(iter) {
594 rec = ftrace_rec_iter_record(iter);
596 ret = add_breakpoints(rec, enable);
597 if (ret)
598 goto remove_breakpoints;
599 count++;
602 run_sync();
604 report = "updating code";
606 for_ftrace_rec_iter(iter) {
607 rec = ftrace_rec_iter_record(iter);
609 ret = add_update(rec, enable);
610 if (ret)
611 goto remove_breakpoints;
614 run_sync();
616 report = "removing breakpoints";
618 for_ftrace_rec_iter(iter) {
619 rec = ftrace_rec_iter_record(iter);
621 ret = finish_update(rec, enable);
622 if (ret)
623 goto remove_breakpoints;
626 run_sync();
628 return;
630 remove_breakpoints:
631 ftrace_bug(ret, rec ? rec->ip : 0);
632 printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
633 for_ftrace_rec_iter(iter) {
634 rec = ftrace_rec_iter_record(iter);
635 remove_breakpoint(rec);
639 static int
640 ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
641 unsigned const char *new_code)
643 int ret;
645 ret = add_break(ip, old_code);
646 if (ret)
647 goto out;
649 run_sync();
651 ret = add_update_code(ip, new_code);
652 if (ret)
653 goto fail_update;
655 run_sync();
657 ret = ftrace_write(ip, new_code, 1);
658 if (ret) {
659 ret = -EPERM;
660 goto out;
662 run_sync();
663 out:
664 return ret;
666 fail_update:
667 probe_kernel_write((void *)ip, &old_code[0], 1);
668 goto out;
671 void arch_ftrace_update_code(int command)
673 /* See comment above by declaration of modifying_ftrace_code */
674 atomic_inc(&modifying_ftrace_code);
676 ftrace_modify_all_code(command);
678 atomic_dec(&modifying_ftrace_code);
681 int __init ftrace_dyn_arch_init(void *data)
683 /* The return code is retured via data */
684 *(unsigned long *)data = 0;
686 return 0;
688 #endif
690 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
692 #ifdef CONFIG_DYNAMIC_FTRACE
693 extern void ftrace_graph_call(void);
695 static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
697 static union ftrace_code_union calc;
699 /* Jmp not a call (ignore the .e8) */
700 calc.e8 = 0xe9;
701 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
704 * ftrace external locks synchronize the access to the static variable.
706 return calc.code;
709 static int ftrace_mod_jmp(unsigned long ip, void *func)
711 unsigned char *new;
713 new = ftrace_jmp_replace(ip, (unsigned long)func);
715 return update_ftrace_func(ip, new);
718 int ftrace_enable_ftrace_graph_caller(void)
720 unsigned long ip = (unsigned long)(&ftrace_graph_call);
722 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
725 int ftrace_disable_ftrace_graph_caller(void)
727 unsigned long ip = (unsigned long)(&ftrace_graph_call);
729 return ftrace_mod_jmp(ip, &ftrace_stub);
732 #endif /* !CONFIG_DYNAMIC_FTRACE */
735 * Hook the return address and push it in the stack of return addrs
736 * in current thread info.
738 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
739 unsigned long frame_pointer)
741 unsigned long old;
742 int faulted;
743 struct ftrace_graph_ent trace;
744 unsigned long return_hooker = (unsigned long)
745 &return_to_handler;
747 if (unlikely(atomic_read(&current->tracing_graph_pause)))
748 return;
751 * Protect against fault, even if it shouldn't
752 * happen. This tool is too much intrusive to
753 * ignore such a protection.
755 asm volatile(
756 "1: " _ASM_MOV " (%[parent]), %[old]\n"
757 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
758 " movl $0, %[faulted]\n"
759 "3:\n"
761 ".section .fixup, \"ax\"\n"
762 "4: movl $1, %[faulted]\n"
763 " jmp 3b\n"
764 ".previous\n"
766 _ASM_EXTABLE(1b, 4b)
767 _ASM_EXTABLE(2b, 4b)
769 : [old] "=&r" (old), [faulted] "=r" (faulted)
770 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
771 : "memory"
774 if (unlikely(faulted)) {
775 ftrace_graph_stop();
776 WARN_ON(1);
777 return;
780 trace.func = self_addr;
781 trace.depth = current->curr_ret_stack + 1;
783 /* Only trace if the calling function expects to */
784 if (!ftrace_graph_entry(&trace)) {
785 *parent = old;
786 return;
789 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
790 frame_pointer) == -EBUSY) {
791 *parent = old;
792 return;
795 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */