1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Kernel Probes (KProbes)
5 * Copyright (C) IBM Corporation, 2002, 2004
7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8 * Probes initial implementation ( includes contributions from
10 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
11 * interface to access function arguments.
12 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
16 #include <linux/kprobes.h>
17 #include <linux/ptrace.h>
18 #include <linux/preempt.h>
19 #include <linux/extable.h>
20 #include <linux/kdebug.h>
21 #include <linux/slab.h>
22 #include <asm/code-patching.h>
23 #include <asm/cacheflush.h>
24 #include <asm/sstep.h>
25 #include <asm/sections.h>
26 #include <linux/uaccess.h>
28 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
29 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
31 struct kretprobe_blackpoint kretprobe_blacklist
[] = {{NULL
, NULL
}};
33 bool arch_within_kprobe_blacklist(unsigned long addr
)
35 return (addr
>= (unsigned long)__kprobes_text_start
&&
36 addr
< (unsigned long)__kprobes_text_end
) ||
37 (addr
>= (unsigned long)_stext
&&
38 addr
< (unsigned long)__head_end
);
41 kprobe_opcode_t
*kprobe_lookup_name(const char *name
, unsigned int offset
)
43 kprobe_opcode_t
*addr
= NULL
;
45 #ifdef PPC64_ELF_ABI_v2
46 /* PPC64 ABIv2 needs local entry point */
47 addr
= (kprobe_opcode_t
*)kallsyms_lookup_name(name
);
48 if (addr
&& !offset
) {
49 #ifdef CONFIG_KPROBES_ON_FTRACE
52 * Per livepatch.h, ftrace location is always within the first
53 * 16 bytes of a function on powerpc with -mprofile-kernel.
55 faddr
= ftrace_location_range((unsigned long)addr
,
56 (unsigned long)addr
+ 16);
58 addr
= (kprobe_opcode_t
*)faddr
;
61 addr
= (kprobe_opcode_t
*)ppc_function_entry(addr
);
63 #elif defined(PPC64_ELF_ABI_v1)
65 * 64bit powerpc ABIv1 uses function descriptors:
66 * - Check for the dot variant of the symbol first.
67 * - If that fails, try looking up the symbol provided.
69 * This ensures we always get to the actual symbol and not
72 * Also handle <module:symbol> format.
74 char dot_name
[MODULE_NAME_LEN
+ 1 + KSYM_NAME_LEN
];
75 bool dot_appended
= false;
80 if ((c
= strnchr(name
, MODULE_NAME_LEN
, ':')) != NULL
) {
83 memcpy(dot_name
, name
, len
);
87 if (*c
!= '\0' && *c
!= '.') {
88 dot_name
[len
++] = '.';
91 ret
= strscpy(dot_name
+ len
, c
, KSYM_NAME_LEN
);
93 addr
= (kprobe_opcode_t
*)kallsyms_lookup_name(dot_name
);
95 /* Fallback to the original non-dot symbol lookup */
96 if (!addr
&& dot_appended
)
97 addr
= (kprobe_opcode_t
*)kallsyms_lookup_name(name
);
99 addr
= (kprobe_opcode_t
*)kallsyms_lookup_name(name
);
105 int arch_prepare_kprobe(struct kprobe
*p
)
108 kprobe_opcode_t insn
= *p
->addr
;
110 if ((unsigned long)p
->addr
& 0x03) {
111 printk("Attempt to register kprobe at an unaligned address\n");
113 } else if (IS_MTMSRD(insn
) || IS_RFID(insn
) || IS_RFI(insn
)) {
114 printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
118 /* insn must be on a special executable page on ppc64. This is
119 * not explicitly required on ppc32 (right now), but it doesn't hurt */
121 p
->ainsn
.insn
= get_insn_slot();
127 memcpy(p
->ainsn
.insn
, p
->addr
,
128 MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
129 p
->opcode
= *p
->addr
;
130 flush_icache_range((unsigned long)p
->ainsn
.insn
,
131 (unsigned long)p
->ainsn
.insn
+ sizeof(kprobe_opcode_t
));
134 p
->ainsn
.boostable
= 0;
137 NOKPROBE_SYMBOL(arch_prepare_kprobe
);
139 void arch_arm_kprobe(struct kprobe
*p
)
141 patch_instruction(p
->addr
, BREAKPOINT_INSTRUCTION
);
143 NOKPROBE_SYMBOL(arch_arm_kprobe
);
145 void arch_disarm_kprobe(struct kprobe
*p
)
147 patch_instruction(p
->addr
, p
->opcode
);
149 NOKPROBE_SYMBOL(arch_disarm_kprobe
);
151 void arch_remove_kprobe(struct kprobe
*p
)
154 free_insn_slot(p
->ainsn
.insn
, 0);
155 p
->ainsn
.insn
= NULL
;
158 NOKPROBE_SYMBOL(arch_remove_kprobe
);
160 static nokprobe_inline
void prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
162 enable_single_step(regs
);
165 * On powerpc we should single step on the original
166 * instruction even if the probed insn is a trap
167 * variant as values in regs could play a part in
168 * if the trap is taken or not
170 regs
->nip
= (unsigned long)p
->ainsn
.insn
;
173 static nokprobe_inline
void save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
175 kcb
->prev_kprobe
.kp
= kprobe_running();
176 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
177 kcb
->prev_kprobe
.saved_msr
= kcb
->kprobe_saved_msr
;
180 static nokprobe_inline
void restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
182 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
183 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
184 kcb
->kprobe_saved_msr
= kcb
->prev_kprobe
.saved_msr
;
187 static nokprobe_inline
void set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
188 struct kprobe_ctlblk
*kcb
)
190 __this_cpu_write(current_kprobe
, p
);
191 kcb
->kprobe_saved_msr
= regs
->msr
;
194 bool arch_kprobe_on_func_entry(unsigned long offset
)
196 #ifdef PPC64_ELF_ABI_v2
197 #ifdef CONFIG_KPROBES_ON_FTRACE
207 void arch_prepare_kretprobe(struct kretprobe_instance
*ri
, struct pt_regs
*regs
)
209 ri
->ret_addr
= (kprobe_opcode_t
*)regs
->link
;
211 /* Replace the return addr with trampoline addr */
212 regs
->link
= (unsigned long)kretprobe_trampoline
;
214 NOKPROBE_SYMBOL(arch_prepare_kretprobe
);
216 static int try_to_emulate(struct kprobe
*p
, struct pt_regs
*regs
)
219 unsigned int insn
= *p
->ainsn
.insn
;
221 /* regs->nip is also adjusted if emulate_step returns 1 */
222 ret
= emulate_step(regs
, insn
);
225 * Once this instruction has been boosted
226 * successfully, set the boostable flag
228 if (unlikely(p
->ainsn
.boostable
== 0))
229 p
->ainsn
.boostable
= 1;
230 } else if (ret
< 0) {
232 * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
233 * So, we should never get here... but, its still
234 * good to catch them, just in case...
236 printk("Can't step on instruction %x\n", insn
);
240 * If we haven't previously emulated this instruction, then it
241 * can't be boosted. Note it down so we don't try to do so again.
243 * If, however, we had emulated this instruction in the past,
244 * then this is just an error with the current run (for
245 * instance, exceptions due to a load/store). We return 0 so
246 * that this is now single-stepped, but continue to try
247 * emulating it in subsequent probe hits.
249 if (unlikely(p
->ainsn
.boostable
!= 1))
250 p
->ainsn
.boostable
= -1;
255 NOKPROBE_SYMBOL(try_to_emulate
);
257 int kprobe_handler(struct pt_regs
*regs
)
261 unsigned int *addr
= (unsigned int *)regs
->nip
;
262 struct kprobe_ctlblk
*kcb
;
268 * We don't want to be preempted for the entire
269 * duration of kprobe processing
272 kcb
= get_kprobe_ctlblk();
274 /* Check we're not actually recursing */
275 if (kprobe_running()) {
276 p
= get_kprobe(addr
);
278 kprobe_opcode_t insn
= *p
->ainsn
.insn
;
279 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
281 /* Turn off 'trace' bits */
282 regs
->msr
&= ~MSR_SINGLESTEP
;
283 regs
->msr
|= kcb
->kprobe_saved_msr
;
286 /* We have reentered the kprobe_handler(), since
287 * another probe was hit while within the handler.
288 * We here save the original kprobes variables and
289 * just single step on the instruction of the new probe
290 * without calling any user handlers.
292 save_previous_kprobe(kcb
);
293 set_current_kprobe(p
, regs
, kcb
);
294 kprobes_inc_nmissed_count(p
);
295 kcb
->kprobe_status
= KPROBE_REENTER
;
296 if (p
->ainsn
.boostable
>= 0) {
297 ret
= try_to_emulate(p
, regs
);
300 restore_previous_kprobe(kcb
);
301 preempt_enable_no_resched();
305 prepare_singlestep(p
, regs
);
307 } else if (*addr
!= BREAKPOINT_INSTRUCTION
) {
308 /* If trap variant, then it belongs not to us */
309 kprobe_opcode_t cur_insn
= *addr
;
311 if (is_trap(cur_insn
))
313 /* The breakpoint instruction was removed by
314 * another cpu right after we hit, no further
315 * handling of this interrupt is appropriate
322 p
= get_kprobe(addr
);
324 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
326 * PowerPC has multiple variants of the "trap"
327 * instruction. If the current instruction is a
328 * trap variant, it could belong to someone else
330 kprobe_opcode_t cur_insn
= *addr
;
331 if (is_trap(cur_insn
))
334 * The breakpoint instruction was removed right
335 * after we hit it. Another cpu has removed
336 * either a probepoint or a debugger breakpoint
337 * at this address. In either case, no further
338 * handling of this interrupt is appropriate.
342 /* Not one of ours: let kernel handle it */
346 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
347 set_current_kprobe(p
, regs
, kcb
);
348 if (p
->pre_handler
&& p
->pre_handler(p
, regs
)) {
349 /* handler changed execution path, so skip ss setup */
350 reset_current_kprobe();
351 preempt_enable_no_resched();
355 if (p
->ainsn
.boostable
>= 0) {
356 ret
= try_to_emulate(p
, regs
);
360 p
->post_handler(p
, regs
, 0);
362 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
363 reset_current_kprobe();
364 preempt_enable_no_resched();
368 prepare_singlestep(p
, regs
);
369 kcb
->kprobe_status
= KPROBE_HIT_SS
;
373 preempt_enable_no_resched();
376 NOKPROBE_SYMBOL(kprobe_handler
);
379 * Function return probe trampoline:
380 * - init_kprobes() establishes a probepoint here
381 * - When the probed function returns, this probe
382 * causes the handlers to fire
384 asm(".global kretprobe_trampoline\n"
385 ".type kretprobe_trampoline, @function\n"
386 "kretprobe_trampoline:\n"
389 ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
392 * Called when the probe at kretprobe trampoline is hit
394 static int trampoline_probe_handler(struct kprobe
*p
, struct pt_regs
*regs
)
396 struct kretprobe_instance
*ri
= NULL
;
397 struct hlist_head
*head
, empty_rp
;
398 struct hlist_node
*tmp
;
399 unsigned long flags
, orig_ret_address
= 0;
400 unsigned long trampoline_address
=(unsigned long)&kretprobe_trampoline
;
402 INIT_HLIST_HEAD(&empty_rp
);
403 kretprobe_hash_lock(current
, &head
, &flags
);
406 * It is possible to have multiple instances associated with a given
407 * task either because an multiple functions in the call path
408 * have a return probe installed on them, and/or more than one return
409 * return probe was registered for a target function.
411 * We can handle this because:
412 * - instances are always inserted at the head of the list
413 * - when multiple return probes are registered for the same
414 * function, the first instance's ret_addr will point to the
415 * real return address, and all the rest will point to
416 * kretprobe_trampoline
418 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
419 if (ri
->task
!= current
)
420 /* another task is sharing our hash bucket */
423 if (ri
->rp
&& ri
->rp
->handler
)
424 ri
->rp
->handler(ri
, regs
);
426 orig_ret_address
= (unsigned long)ri
->ret_addr
;
427 recycle_rp_inst(ri
, &empty_rp
);
429 if (orig_ret_address
!= trampoline_address
)
431 * This is the real return address. Any other
432 * instances associated with this task are for
433 * other calls deeper on the call stack
438 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
441 * We get here through one of two paths:
442 * 1. by taking a trap -> kprobe_handler() -> here
443 * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
445 * When going back through (1), we need regs->nip to be setup properly
446 * as it is used to determine the return address from the trap.
447 * For (2), since nip is not honoured with optprobes, we instead setup
448 * the link register properly so that the subsequent 'blr' in
449 * kretprobe_trampoline jumps back to the right instruction.
451 * For nip, we should set the address to the previous instruction since
452 * we end up emulating it in kprobe_handler(), which increments the nip
455 regs
->nip
= orig_ret_address
- 4;
456 regs
->link
= orig_ret_address
;
458 kretprobe_hash_unlock(current
, &flags
);
460 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
461 hlist_del(&ri
->hlist
);
467 NOKPROBE_SYMBOL(trampoline_probe_handler
);
470 * Called after single-stepping. p->addr is the address of the
471 * instruction whose first byte has been replaced by the "breakpoint"
472 * instruction. To avoid the SMP problems that can occur when we
473 * temporarily put back the original opcode to single-step, we
474 * single-stepped a copy of the instruction. The address of this
475 * copy is p->ainsn.insn.
477 int kprobe_post_handler(struct pt_regs
*regs
)
479 struct kprobe
*cur
= kprobe_running();
480 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
482 if (!cur
|| user_mode(regs
))
485 /* make sure we got here for instruction we have a kprobe on */
486 if (((unsigned long)cur
->ainsn
.insn
+ 4) != regs
->nip
)
489 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
490 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
491 cur
->post_handler(cur
, regs
, 0);
494 /* Adjust nip to after the single-stepped instruction */
495 regs
->nip
= (unsigned long)cur
->addr
+ 4;
496 regs
->msr
|= kcb
->kprobe_saved_msr
;
498 /*Restore back the original saved kprobes variables and continue. */
499 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
500 restore_previous_kprobe(kcb
);
503 reset_current_kprobe();
505 preempt_enable_no_resched();
508 * if somebody else is singlestepping across a probe point, msr
509 * will have DE/SE set, in which case, continue the remaining processing
510 * of do_debug, as if this is not a probe hit.
512 if (regs
->msr
& MSR_SINGLESTEP
)
517 NOKPROBE_SYMBOL(kprobe_post_handler
);
519 int kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
521 struct kprobe
*cur
= kprobe_running();
522 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
523 const struct exception_table_entry
*entry
;
525 switch(kcb
->kprobe_status
) {
529 * We are here because the instruction being single
530 * stepped caused a page fault. We reset the current
531 * kprobe and the nip points back to the probe address
532 * and allow the page fault handler to continue as a
535 regs
->nip
= (unsigned long)cur
->addr
;
536 regs
->msr
&= ~MSR_SINGLESTEP
; /* Turn off 'trace' bits */
537 regs
->msr
|= kcb
->kprobe_saved_msr
;
538 if (kcb
->kprobe_status
== KPROBE_REENTER
)
539 restore_previous_kprobe(kcb
);
541 reset_current_kprobe();
542 preempt_enable_no_resched();
544 case KPROBE_HIT_ACTIVE
:
545 case KPROBE_HIT_SSDONE
:
547 * We increment the nmissed count for accounting,
548 * we can also use npre/npostfault count for accounting
549 * these specific fault cases.
551 kprobes_inc_nmissed_count(cur
);
554 * We come here because instructions in the pre/post
555 * handler caused the page_fault, this could happen
556 * if handler tries to access user space by
557 * copy_from_user(), get_user() etc. Let the
558 * user-specified handler try to fix it first.
560 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
564 * In case the user-specified fault handler returned
565 * zero, try to fix up.
567 if ((entry
= search_exception_tables(regs
->nip
)) != NULL
) {
568 regs
->nip
= extable_fixup(entry
);
573 * fixup_exception() could not handle it,
574 * Let do_page_fault() fix it.
582 NOKPROBE_SYMBOL(kprobe_fault_handler
);
584 unsigned long arch_deref_entry_point(void *entry
)
586 #ifdef PPC64_ELF_ABI_v1
587 if (!kernel_text_address((unsigned long)entry
))
588 return ppc_global_function_entry(entry
);
591 return (unsigned long)entry
;
593 NOKPROBE_SYMBOL(arch_deref_entry_point
);
595 static struct kprobe trampoline_p
= {
596 .addr
= (kprobe_opcode_t
*) &kretprobe_trampoline
,
597 .pre_handler
= trampoline_probe_handler
600 int __init
arch_init_kprobes(void)
602 return register_kprobe(&trampoline_p
);
605 int arch_trampoline_kprobe(struct kprobe
*p
)
607 if (p
->addr
== (kprobe_opcode_t
*)&kretprobe_trampoline
)
612 NOKPROBE_SYMBOL(arch_trampoline_kprobe
);