2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
29 #include <linux/kprobes.h>
30 #include <linux/ptrace.h>
31 #include <linux/preempt.h>
32 #include <linux/extable.h>
33 #include <linux/kdebug.h>
34 #include <linux/slab.h>
35 #include <asm/code-patching.h>
36 #include <asm/cacheflush.h>
37 #include <asm/sstep.h>
38 #include <asm/sections.h>
39 #include <linux/uaccess.h>
41 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
42 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
44 struct kretprobe_blackpoint kretprobe_blacklist
[] = {{NULL
, NULL
}};
46 int is_current_kprobe_addr(unsigned long addr
)
48 struct kprobe
*p
= kprobe_running();
49 return (p
&& (unsigned long)p
->addr
== addr
) ? 1 : 0;
52 bool arch_within_kprobe_blacklist(unsigned long addr
)
54 return (addr
>= (unsigned long)__kprobes_text_start
&&
55 addr
< (unsigned long)__kprobes_text_end
) ||
56 (addr
>= (unsigned long)_stext
&&
57 addr
< (unsigned long)__head_end
);
60 kprobe_opcode_t
*kprobe_lookup_name(const char *name
, unsigned int offset
)
62 kprobe_opcode_t
*addr
;
64 #ifdef PPC64_ELF_ABI_v2
65 /* PPC64 ABIv2 needs local entry point */
66 addr
= (kprobe_opcode_t
*)kallsyms_lookup_name(name
);
67 if (addr
&& !offset
) {
68 #ifdef CONFIG_KPROBES_ON_FTRACE
71 * Per livepatch.h, ftrace location is always within the first
72 * 16 bytes of a function on powerpc with -mprofile-kernel.
74 faddr
= ftrace_location_range((unsigned long)addr
,
75 (unsigned long)addr
+ 16);
77 addr
= (kprobe_opcode_t
*)faddr
;
80 addr
= (kprobe_opcode_t
*)ppc_function_entry(addr
);
82 #elif defined(PPC64_ELF_ABI_v1)
84 * 64bit powerpc ABIv1 uses function descriptors:
85 * - Check for the dot variant of the symbol first.
86 * - If that fails, try looking up the symbol provided.
88 * This ensures we always get to the actual symbol and not
91 * Also handle <module:symbol> format.
93 char dot_name
[MODULE_NAME_LEN
+ 1 + KSYM_NAME_LEN
];
95 bool dot_appended
= false;
96 if ((modsym
= strchr(name
, ':')) != NULL
) {
98 if (*modsym
!= '\0' && *modsym
!= '.') {
99 /* Convert to <module:.symbol> */
100 strncpy(dot_name
, name
, modsym
- name
);
101 dot_name
[modsym
- name
] = '.';
102 dot_name
[modsym
- name
+ 1] = '\0';
103 strncat(dot_name
, modsym
,
104 sizeof(dot_name
) - (modsym
- name
) - 2);
108 strncat(dot_name
, name
, sizeof(dot_name
) - 1);
110 } else if (name
[0] != '.') {
113 strncat(dot_name
, name
, KSYM_NAME_LEN
- 2);
117 strncat(dot_name
, name
, KSYM_NAME_LEN
- 1);
119 addr
= (kprobe_opcode_t
*)kallsyms_lookup_name(dot_name
);
120 if (!addr
&& dot_appended
) {
121 /* Let's try the original non-dot symbol lookup */
122 addr
= (kprobe_opcode_t
*)kallsyms_lookup_name(name
);
125 addr
= (kprobe_opcode_t
*)kallsyms_lookup_name(name
);
131 int arch_prepare_kprobe(struct kprobe
*p
)
134 kprobe_opcode_t insn
= *p
->addr
;
136 if ((unsigned long)p
->addr
& 0x03) {
137 printk("Attempt to register kprobe at an unaligned address\n");
139 } else if (IS_MTMSRD(insn
) || IS_RFID(insn
) || IS_RFI(insn
)) {
140 printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
144 /* insn must be on a special executable page on ppc64. This is
145 * not explicitly required on ppc32 (right now), but it doesn't hurt */
147 p
->ainsn
.insn
= get_insn_slot();
153 memcpy(p
->ainsn
.insn
, p
->addr
,
154 MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
155 p
->opcode
= *p
->addr
;
156 flush_icache_range((unsigned long)p
->ainsn
.insn
,
157 (unsigned long)p
->ainsn
.insn
+ sizeof(kprobe_opcode_t
));
160 p
->ainsn
.boostable
= 0;
163 NOKPROBE_SYMBOL(arch_prepare_kprobe
);
165 void arch_arm_kprobe(struct kprobe
*p
)
167 patch_instruction(p
->addr
, BREAKPOINT_INSTRUCTION
);
169 NOKPROBE_SYMBOL(arch_arm_kprobe
);
171 void arch_disarm_kprobe(struct kprobe
*p
)
173 patch_instruction(p
->addr
, p
->opcode
);
175 NOKPROBE_SYMBOL(arch_disarm_kprobe
);
177 void arch_remove_kprobe(struct kprobe
*p
)
180 free_insn_slot(p
->ainsn
.insn
, 0);
181 p
->ainsn
.insn
= NULL
;
184 NOKPROBE_SYMBOL(arch_remove_kprobe
);
186 static nokprobe_inline
void prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
188 enable_single_step(regs
);
191 * On powerpc we should single step on the original
192 * instruction even if the probed insn is a trap
193 * variant as values in regs could play a part in
194 * if the trap is taken or not
196 regs
->nip
= (unsigned long)p
->ainsn
.insn
;
199 static nokprobe_inline
void save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
201 kcb
->prev_kprobe
.kp
= kprobe_running();
202 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
203 kcb
->prev_kprobe
.saved_msr
= kcb
->kprobe_saved_msr
;
206 static nokprobe_inline
void restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
208 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
209 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
210 kcb
->kprobe_saved_msr
= kcb
->prev_kprobe
.saved_msr
;
213 static nokprobe_inline
void set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
214 struct kprobe_ctlblk
*kcb
)
216 __this_cpu_write(current_kprobe
, p
);
217 kcb
->kprobe_saved_msr
= regs
->msr
;
220 bool arch_kprobe_on_func_entry(unsigned long offset
)
222 #ifdef PPC64_ELF_ABI_v2
223 #ifdef CONFIG_KPROBES_ON_FTRACE
233 void arch_prepare_kretprobe(struct kretprobe_instance
*ri
, struct pt_regs
*regs
)
235 ri
->ret_addr
= (kprobe_opcode_t
*)regs
->link
;
237 /* Replace the return addr with trampoline addr */
238 regs
->link
= (unsigned long)kretprobe_trampoline
;
240 NOKPROBE_SYMBOL(arch_prepare_kretprobe
);
242 int try_to_emulate(struct kprobe
*p
, struct pt_regs
*regs
)
245 unsigned int insn
= *p
->ainsn
.insn
;
247 /* regs->nip is also adjusted if emulate_step returns 1 */
248 ret
= emulate_step(regs
, insn
);
251 * Once this instruction has been boosted
252 * successfully, set the boostable flag
254 if (unlikely(p
->ainsn
.boostable
== 0))
255 p
->ainsn
.boostable
= 1;
256 } else if (ret
< 0) {
258 * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
259 * So, we should never get here... but, its still
260 * good to catch them, just in case...
262 printk("Can't step on instruction %x\n", insn
);
265 /* This instruction can't be boosted */
266 p
->ainsn
.boostable
= -1;
270 NOKPROBE_SYMBOL(try_to_emulate
);
272 int kprobe_handler(struct pt_regs
*regs
)
276 unsigned int *addr
= (unsigned int *)regs
->nip
;
277 struct kprobe_ctlblk
*kcb
;
283 * We don't want to be preempted for the entire
284 * duration of kprobe processing
287 kcb
= get_kprobe_ctlblk();
289 /* Check we're not actually recursing */
290 if (kprobe_running()) {
291 p
= get_kprobe(addr
);
293 kprobe_opcode_t insn
= *p
->ainsn
.insn
;
294 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
296 /* Turn off 'trace' bits */
297 regs
->msr
&= ~MSR_SINGLESTEP
;
298 regs
->msr
|= kcb
->kprobe_saved_msr
;
301 /* We have reentered the kprobe_handler(), since
302 * another probe was hit while within the handler.
303 * We here save the original kprobes variables and
304 * just single step on the instruction of the new probe
305 * without calling any user handlers.
307 save_previous_kprobe(kcb
);
308 set_current_kprobe(p
, regs
, kcb
);
309 kprobes_inc_nmissed_count(p
);
310 kcb
->kprobe_status
= KPROBE_REENTER
;
311 if (p
->ainsn
.boostable
>= 0) {
312 ret
= try_to_emulate(p
, regs
);
315 restore_previous_kprobe(kcb
);
316 preempt_enable_no_resched();
320 prepare_singlestep(p
, regs
);
323 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
324 /* If trap variant, then it belongs not to us */
325 kprobe_opcode_t cur_insn
= *addr
;
326 if (is_trap(cur_insn
))
328 /* The breakpoint instruction was removed by
329 * another cpu right after we hit, no further
330 * handling of this interrupt is appropriate
335 p
= __this_cpu_read(current_kprobe
);
336 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
337 if (!skip_singlestep(p
, regs
, kcb
))
345 p
= get_kprobe(addr
);
347 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
349 * PowerPC has multiple variants of the "trap"
350 * instruction. If the current instruction is a
351 * trap variant, it could belong to someone else
353 kprobe_opcode_t cur_insn
= *addr
;
354 if (is_trap(cur_insn
))
357 * The breakpoint instruction was removed right
358 * after we hit it. Another cpu has removed
359 * either a probepoint or a debugger breakpoint
360 * at this address. In either case, no further
361 * handling of this interrupt is appropriate.
365 /* Not one of ours: let kernel handle it */
369 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
370 set_current_kprobe(p
, regs
, kcb
);
371 if (p
->pre_handler
&& p
->pre_handler(p
, regs
))
372 /* handler has already set things up, so skip ss setup */
376 if (p
->ainsn
.boostable
>= 0) {
377 ret
= try_to_emulate(p
, regs
);
381 p
->post_handler(p
, regs
, 0);
383 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
384 reset_current_kprobe();
385 preempt_enable_no_resched();
389 prepare_singlestep(p
, regs
);
390 kcb
->kprobe_status
= KPROBE_HIT_SS
;
394 preempt_enable_no_resched();
397 NOKPROBE_SYMBOL(kprobe_handler
);
400 * Function return probe trampoline:
401 * - init_kprobes() establishes a probepoint here
402 * - When the probed function returns, this probe
403 * causes the handlers to fire
405 asm(".global kretprobe_trampoline\n"
406 ".type kretprobe_trampoline, @function\n"
407 "kretprobe_trampoline:\n"
410 ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
413 * Called when the probe at kretprobe trampoline is hit
415 static int trampoline_probe_handler(struct kprobe
*p
, struct pt_regs
*regs
)
417 struct kretprobe_instance
*ri
= NULL
;
418 struct hlist_head
*head
, empty_rp
;
419 struct hlist_node
*tmp
;
420 unsigned long flags
, orig_ret_address
= 0;
421 unsigned long trampoline_address
=(unsigned long)&kretprobe_trampoline
;
423 INIT_HLIST_HEAD(&empty_rp
);
424 kretprobe_hash_lock(current
, &head
, &flags
);
427 * It is possible to have multiple instances associated with a given
428 * task either because an multiple functions in the call path
429 * have a return probe installed on them, and/or more than one return
430 * return probe was registered for a target function.
432 * We can handle this because:
433 * - instances are always inserted at the head of the list
434 * - when multiple return probes are registered for the same
435 * function, the first instance's ret_addr will point to the
436 * real return address, and all the rest will point to
437 * kretprobe_trampoline
439 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
440 if (ri
->task
!= current
)
441 /* another task is sharing our hash bucket */
444 if (ri
->rp
&& ri
->rp
->handler
)
445 ri
->rp
->handler(ri
, regs
);
447 orig_ret_address
= (unsigned long)ri
->ret_addr
;
448 recycle_rp_inst(ri
, &empty_rp
);
450 if (orig_ret_address
!= trampoline_address
)
452 * This is the real return address. Any other
453 * instances associated with this task are for
454 * other calls deeper on the call stack
459 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
460 regs
->nip
= orig_ret_address
;
462 * Make LR point to the orig_ret_address.
463 * When the 'nop' inside the kretprobe_trampoline
464 * is optimized, we can do a 'blr' after executing the
465 * detour buffer code.
467 regs
->link
= orig_ret_address
;
469 reset_current_kprobe();
470 kretprobe_hash_unlock(current
, &flags
);
471 preempt_enable_no_resched();
473 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
474 hlist_del(&ri
->hlist
);
478 * By returning a non-zero value, we are telling
479 * kprobe_handler() that we don't want the post_handler
480 * to run (and have re-enabled preemption)
484 NOKPROBE_SYMBOL(trampoline_probe_handler
);
487 * Called after single-stepping. p->addr is the address of the
488 * instruction whose first byte has been replaced by the "breakpoint"
489 * instruction. To avoid the SMP problems that can occur when we
490 * temporarily put back the original opcode to single-step, we
491 * single-stepped a copy of the instruction. The address of this
492 * copy is p->ainsn.insn.
494 int kprobe_post_handler(struct pt_regs
*regs
)
496 struct kprobe
*cur
= kprobe_running();
497 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
499 if (!cur
|| user_mode(regs
))
502 /* make sure we got here for instruction we have a kprobe on */
503 if (((unsigned long)cur
->ainsn
.insn
+ 4) != regs
->nip
)
506 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
507 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
508 cur
->post_handler(cur
, regs
, 0);
511 /* Adjust nip to after the single-stepped instruction */
512 regs
->nip
= (unsigned long)cur
->addr
+ 4;
513 regs
->msr
|= kcb
->kprobe_saved_msr
;
515 /*Restore back the original saved kprobes variables and continue. */
516 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
517 restore_previous_kprobe(kcb
);
520 reset_current_kprobe();
522 preempt_enable_no_resched();
525 * if somebody else is singlestepping across a probe point, msr
526 * will have DE/SE set, in which case, continue the remaining processing
527 * of do_debug, as if this is not a probe hit.
529 if (regs
->msr
& MSR_SINGLESTEP
)
534 NOKPROBE_SYMBOL(kprobe_post_handler
);
536 int kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
538 struct kprobe
*cur
= kprobe_running();
539 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
540 const struct exception_table_entry
*entry
;
542 switch(kcb
->kprobe_status
) {
546 * We are here because the instruction being single
547 * stepped caused a page fault. We reset the current
548 * kprobe and the nip points back to the probe address
549 * and allow the page fault handler to continue as a
552 regs
->nip
= (unsigned long)cur
->addr
;
553 regs
->msr
&= ~MSR_SINGLESTEP
; /* Turn off 'trace' bits */
554 regs
->msr
|= kcb
->kprobe_saved_msr
;
555 if (kcb
->kprobe_status
== KPROBE_REENTER
)
556 restore_previous_kprobe(kcb
);
558 reset_current_kprobe();
559 preempt_enable_no_resched();
561 case KPROBE_HIT_ACTIVE
:
562 case KPROBE_HIT_SSDONE
:
564 * We increment the nmissed count for accounting,
565 * we can also use npre/npostfault count for accounting
566 * these specific fault cases.
568 kprobes_inc_nmissed_count(cur
);
571 * We come here because instructions in the pre/post
572 * handler caused the page_fault, this could happen
573 * if handler tries to access user space by
574 * copy_from_user(), get_user() etc. Let the
575 * user-specified handler try to fix it first.
577 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
581 * In case the user-specified fault handler returned
582 * zero, try to fix up.
584 if ((entry
= search_exception_tables(regs
->nip
)) != NULL
) {
585 regs
->nip
= extable_fixup(entry
);
590 * fixup_exception() could not handle it,
591 * Let do_page_fault() fix it.
599 NOKPROBE_SYMBOL(kprobe_fault_handler
);
601 unsigned long arch_deref_entry_point(void *entry
)
603 #ifdef PPC64_ELF_ABI_v1
604 if (!kernel_text_address((unsigned long)entry
))
605 return ppc_global_function_entry(entry
);
608 return (unsigned long)entry
;
610 NOKPROBE_SYMBOL(arch_deref_entry_point
);
612 int setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
614 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
615 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
617 memcpy(&kcb
->jprobe_saved_regs
, regs
, sizeof(struct pt_regs
));
619 /* setup return addr to the jprobe handler routine */
620 regs
->nip
= arch_deref_entry_point(jp
->entry
);
621 #ifdef PPC64_ELF_ABI_v2
622 regs
->gpr
[12] = (unsigned long)jp
->entry
;
623 #elif defined(PPC64_ELF_ABI_v1)
624 regs
->gpr
[2] = (unsigned long)(((func_descr_t
*)jp
->entry
)->toc
);
628 * jprobes use jprobe_return() which skips the normal return
629 * path of the function, and this messes up the accounting of the
630 * function graph tracer.
632 * Pause function graph tracing while performing the jprobe function.
634 pause_graph_tracing();
638 NOKPROBE_SYMBOL(setjmp_pre_handler
);
640 void __used
jprobe_return(void)
642 asm volatile("trap" ::: "memory");
644 NOKPROBE_SYMBOL(jprobe_return
);
646 static void __used
jprobe_return_end(void)
649 NOKPROBE_SYMBOL(jprobe_return_end
);
651 int longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
653 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
656 * FIXME - we should ideally be validating that we got here 'cos
657 * of the "trap" in jprobe_return() above, before restoring the
660 memcpy(regs
, &kcb
->jprobe_saved_regs
, sizeof(struct pt_regs
));
661 /* It's OK to start function graph tracing again */
662 unpause_graph_tracing();
663 preempt_enable_no_resched();
666 NOKPROBE_SYMBOL(longjmp_break_handler
);
668 static struct kprobe trampoline_p
= {
669 .addr
= (kprobe_opcode_t
*) &kretprobe_trampoline
,
670 .pre_handler
= trampoline_probe_handler
673 int __init
arch_init_kprobes(void)
675 return register_kprobe(&trampoline_p
);
678 int arch_trampoline_kprobe(struct kprobe
*p
)
680 if (p
->addr
== (kprobe_opcode_t
*)&kretprobe_trampoline
)
685 NOKPROBE_SYMBOL(arch_trampoline_kprobe
);