1 /* arch/sparc64/kernel/kprobes.c
3 * Copyright (C) 2004 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/kprobes.h>
8 #include <linux/module.h>
9 #include <linux/kdebug.h>
10 #include <linux/slab.h>
11 #include <asm/signal.h>
12 #include <asm/cacheflush.h>
13 #include <asm/uaccess.h>
15 /* We do not have hardware single-stepping on sparc64.
16 * So we implement software single-stepping with breakpoint
17 * traps. The top-level scheme is similar to that used
18 * in the x86 kprobes implementation.
20 * In the kprobe->ainsn.insn[] array we store the original
21 * instruction at index zero and a break instruction at
24 * When we hit a kprobe we:
25 * - Run the pre-handler
26 * - Remember "regs->tnpc" and interrupt level stored in
27 * "regs->tstate" so we can restore them later
28 * - Disable PIL interrupts
29 * - Set regs->tpc to point to kprobe->ainsn.insn[0]
30 * - Set regs->tnpc to point to kprobe->ainsn.insn[1]
31 * - Mark that we are actively in a kprobe
33 * At this point we wait for the second breakpoint at
34 * kprobe->ainsn.insn[1] to hit. When it does we:
35 * - Run the post-handler
36 * - Set regs->tpc to "remembered" regs->tnpc stored above,
37 * restore the PIL interrupt level in "regs->tstate" as well
38 * - Make any adjustments necessary to regs->tnpc in order
39 * to handle relative branches correctly. See below.
40 * - Mark that we are no longer actively in a kprobe.
43 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
44 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
46 struct kretprobe_blackpoint kretprobe_blacklist
[] = {{NULL
, NULL
}};
48 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
50 if ((unsigned long) p
->addr
& 0x3UL
)
53 p
->ainsn
.insn
[0] = *p
->addr
;
54 flushi(&p
->ainsn
.insn
[0]);
56 p
->ainsn
.insn
[1] = BREAKPOINT_INSTRUCTION_2
;
57 flushi(&p
->ainsn
.insn
[1]);
63 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
65 *p
->addr
= BREAKPOINT_INSTRUCTION
;
69 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
75 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
77 kcb
->prev_kprobe
.kp
= kprobe_running();
78 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
79 kcb
->prev_kprobe
.orig_tnpc
= kcb
->kprobe_orig_tnpc
;
80 kcb
->prev_kprobe
.orig_tstate_pil
= kcb
->kprobe_orig_tstate_pil
;
83 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
85 __get_cpu_var(current_kprobe
) = kcb
->prev_kprobe
.kp
;
86 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
87 kcb
->kprobe_orig_tnpc
= kcb
->prev_kprobe
.orig_tnpc
;
88 kcb
->kprobe_orig_tstate_pil
= kcb
->prev_kprobe
.orig_tstate_pil
;
91 static void __kprobes
set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
92 struct kprobe_ctlblk
*kcb
)
94 __get_cpu_var(current_kprobe
) = p
;
95 kcb
->kprobe_orig_tnpc
= regs
->tnpc
;
96 kcb
->kprobe_orig_tstate_pil
= (regs
->tstate
& TSTATE_PIL
);
99 static void __kprobes
prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
,
100 struct kprobe_ctlblk
*kcb
)
102 regs
->tstate
|= TSTATE_PIL
;
104 /*single step inline, if it a breakpoint instruction*/
105 if (p
->opcode
== BREAKPOINT_INSTRUCTION
) {
106 regs
->tpc
= (unsigned long) p
->addr
;
107 regs
->tnpc
= kcb
->kprobe_orig_tnpc
;
109 regs
->tpc
= (unsigned long) &p
->ainsn
.insn
[0];
110 regs
->tnpc
= (unsigned long) &p
->ainsn
.insn
[1];
114 static int __kprobes
kprobe_handler(struct pt_regs
*regs
)
117 void *addr
= (void *) regs
->tpc
;
119 struct kprobe_ctlblk
*kcb
;
122 * We don't want to be preempted for the entire
123 * duration of kprobe processing
126 kcb
= get_kprobe_ctlblk();
128 if (kprobe_running()) {
129 p
= get_kprobe(addr
);
131 if (kcb
->kprobe_status
== KPROBE_HIT_SS
) {
132 regs
->tstate
= ((regs
->tstate
& ~TSTATE_PIL
) |
133 kcb
->kprobe_orig_tstate_pil
);
136 /* We have reentered the kprobe_handler(), since
137 * another probe was hit while within the handler.
138 * We here save the original kprobes variables and
139 * just single step on the instruction of the new probe
140 * without calling any user handlers.
142 save_previous_kprobe(kcb
);
143 set_current_kprobe(p
, regs
, kcb
);
144 kprobes_inc_nmissed_count(p
);
145 kcb
->kprobe_status
= KPROBE_REENTER
;
146 prepare_singlestep(p
, regs
, kcb
);
149 if (*(u32
*)addr
!= BREAKPOINT_INSTRUCTION
) {
150 /* The breakpoint instruction was removed by
151 * another cpu right after we hit, no further
152 * handling of this interrupt is appropriate
157 p
= __get_cpu_var(current_kprobe
);
158 if (p
->break_handler
&& p
->break_handler(p
, regs
))
164 p
= get_kprobe(addr
);
166 if (*(u32
*)addr
!= BREAKPOINT_INSTRUCTION
) {
168 * The breakpoint instruction was removed right
169 * after we hit it. Another cpu has removed
170 * either a probepoint or a debugger breakpoint
171 * at this address. In either case, no further
172 * handling of this interrupt is appropriate.
176 /* Not one of ours: let kernel handle it */
180 set_current_kprobe(p
, regs
, kcb
);
181 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
182 if (p
->pre_handler
&& p
->pre_handler(p
, regs
))
186 prepare_singlestep(p
, regs
, kcb
);
187 kcb
->kprobe_status
= KPROBE_HIT_SS
;
191 preempt_enable_no_resched();
195 /* If INSN is a relative control transfer instruction,
196 * return the corrected branch destination value.
198 * regs->tpc and regs->tnpc still hold the values of the
199 * program counters at the time of trap due to the execution
200 * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1]
203 static unsigned long __kprobes
relbranch_fixup(u32 insn
, struct kprobe
*p
,
204 struct pt_regs
*regs
)
206 unsigned long real_pc
= (unsigned long) p
->addr
;
208 /* Branch not taken, no mods necessary. */
209 if (regs
->tnpc
== regs
->tpc
+ 0x4UL
)
210 return real_pc
+ 0x8UL
;
212 /* The three cases are call, branch w/prediction,
213 * and traditional branch.
215 if ((insn
& 0xc0000000) == 0x40000000 ||
216 (insn
& 0xc1c00000) == 0x00400000 ||
217 (insn
& 0xc1c00000) == 0x00800000) {
218 unsigned long ainsn_addr
;
220 ainsn_addr
= (unsigned long) &p
->ainsn
.insn
[0];
222 /* The instruction did all the work for us
223 * already, just apply the offset to the correct
224 * instruction location.
226 return (real_pc
+ (regs
->tnpc
- ainsn_addr
));
229 /* It is jmpl or some other absolute PC modification instruction,
235 /* If INSN is an instruction which writes it's PC location
236 * into a destination register, fix that up.
238 static void __kprobes
retpc_fixup(struct pt_regs
*regs
, u32 insn
,
239 unsigned long real_pc
)
241 unsigned long *slot
= NULL
;
243 /* Simplest case is 'call', which always uses %o7 */
244 if ((insn
& 0xc0000000) == 0x40000000) {
245 slot
= ®s
->u_regs
[UREG_I7
];
248 /* 'jmpl' encodes the register inside of the opcode */
249 if ((insn
& 0xc1f80000) == 0x81c00000) {
250 unsigned long rd
= ((insn
>> 25) & 0x1f);
253 slot
= ®s
->u_regs
[rd
];
255 /* Hard case, it goes onto the stack. */
259 slot
= (unsigned long *)
260 (regs
->u_regs
[UREG_FP
] + STACK_BIAS
);
269 * Called after single-stepping. p->addr is the address of the
270 * instruction which has been replaced by the breakpoint
271 * instruction. To avoid the SMP problems that can occur when we
272 * temporarily put back the original opcode to single-step, we
273 * single-stepped a copy of the instruction. The address of this
274 * copy is &p->ainsn.insn[0].
276 * This function prepares to return from the post-single-step
279 static void __kprobes
resume_execution(struct kprobe
*p
,
280 struct pt_regs
*regs
, struct kprobe_ctlblk
*kcb
)
282 u32 insn
= p
->ainsn
.insn
[0];
284 regs
->tnpc
= relbranch_fixup(insn
, p
, regs
);
286 /* This assignment must occur after relbranch_fixup() */
287 regs
->tpc
= kcb
->kprobe_orig_tnpc
;
289 retpc_fixup(regs
, insn
, (unsigned long) p
->addr
);
291 regs
->tstate
= ((regs
->tstate
& ~TSTATE_PIL
) |
292 kcb
->kprobe_orig_tstate_pil
);
295 static int __kprobes
post_kprobe_handler(struct pt_regs
*regs
)
297 struct kprobe
*cur
= kprobe_running();
298 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
303 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
304 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
305 cur
->post_handler(cur
, regs
, 0);
308 resume_execution(cur
, regs
, kcb
);
310 /*Restore back the original saved kprobes variables and continue. */
311 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
312 restore_previous_kprobe(kcb
);
315 reset_current_kprobe();
317 preempt_enable_no_resched();
322 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
324 struct kprobe
*cur
= kprobe_running();
325 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
326 const struct exception_table_entry
*entry
;
328 switch(kcb
->kprobe_status
) {
332 * We are here because the instruction being single
333 * stepped caused a page fault. We reset the current
334 * kprobe and the tpc points back to the probe address
335 * and allow the page fault handler to continue as a
338 regs
->tpc
= (unsigned long)cur
->addr
;
339 regs
->tnpc
= kcb
->kprobe_orig_tnpc
;
340 regs
->tstate
= ((regs
->tstate
& ~TSTATE_PIL
) |
341 kcb
->kprobe_orig_tstate_pil
);
342 if (kcb
->kprobe_status
== KPROBE_REENTER
)
343 restore_previous_kprobe(kcb
);
345 reset_current_kprobe();
346 preempt_enable_no_resched();
348 case KPROBE_HIT_ACTIVE
:
349 case KPROBE_HIT_SSDONE
:
351 * We increment the nmissed count for accounting,
352 * we can also use npre/npostfault count for accouting
353 * these specific fault cases.
355 kprobes_inc_nmissed_count(cur
);
358 * We come here because instructions in the pre/post
359 * handler caused the page_fault, this could happen
360 * if handler tries to access user space by
361 * copy_from_user(), get_user() etc. Let the
362 * user-specified handler try to fix it first.
364 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
368 * In case the user-specified fault handler returned
369 * zero, try to fix up.
372 entry
= search_exception_tables(regs
->tpc
);
374 regs
->tpc
= entry
->fixup
;
375 regs
->tnpc
= regs
->tpc
+ 4;
380 * fixup_exception() could not handle it,
381 * Let do_page_fault() fix it.
392 * Wrapper routine to for handling exceptions.
394 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
395 unsigned long val
, void *data
)
397 struct die_args
*args
= (struct die_args
*)data
;
398 int ret
= NOTIFY_DONE
;
400 if (args
->regs
&& user_mode(args
->regs
))
405 if (kprobe_handler(args
->regs
))
409 if (post_kprobe_handler(args
->regs
))
418 asmlinkage
void __kprobes
kprobe_trap(unsigned long trap_level
,
419 struct pt_regs
*regs
)
421 BUG_ON(trap_level
!= 0x170 && trap_level
!= 0x171);
423 if (user_mode(regs
)) {
425 bad_trap(regs
, trap_level
);
429 /* trap_level == 0x170 --> ta 0x70
430 * trap_level == 0x171 --> ta 0x71
432 if (notify_die((trap_level
== 0x170) ? DIE_DEBUG
: DIE_DEBUG_2
,
433 (trap_level
== 0x170) ? "debug" : "debug_2",
434 regs
, 0, trap_level
, SIGTRAP
) != NOTIFY_STOP
)
435 bad_trap(regs
, trap_level
);
438 /* Jprobes support. */
439 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
441 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
442 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
444 memcpy(&(kcb
->jprobe_saved_regs
), regs
, sizeof(*regs
));
446 regs
->tpc
= (unsigned long) jp
->entry
;
447 regs
->tnpc
= ((unsigned long) jp
->entry
) + 0x4UL
;
448 regs
->tstate
|= TSTATE_PIL
;
453 void __kprobes
jprobe_return(void)
455 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
456 register unsigned long orig_fp
asm("g1");
458 orig_fp
= kcb
->jprobe_saved_regs
.u_regs
[UREG_FP
];
459 __asm__
__volatile__("\n"
460 "1: cmp %%sp, %0\n\t"
461 "blu,a,pt %%xcc, 1b\n\t"
463 ".globl jprobe_return_trap_instruction\n"
464 "jprobe_return_trap_instruction:\n\t"
470 extern void jprobe_return_trap_instruction(void);
472 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
474 u32
*addr
= (u32
*) regs
->tpc
;
475 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
477 if (addr
== (u32
*) jprobe_return_trap_instruction
) {
478 memcpy(regs
, &(kcb
->jprobe_saved_regs
), sizeof(*regs
));
479 preempt_enable_no_resched();
485 /* The value stored in the return address register is actually 2
486 * instructions before where the callee will return to.
487 * Sequences usually look something like this
489 * call some_function <--- return register points here
490 * nop <--- call delay slot
491 * whatever <--- where callee returns to
493 * To keep trampoline_probe_handler logic simpler, we normalize the
494 * value kept in ri->ret_addr so we don't need to keep adjusting it
497 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
498 struct pt_regs
*regs
)
500 ri
->ret_addr
= (kprobe_opcode_t
*)(regs
->u_regs
[UREG_RETPC
] + 8);
502 /* Replace the return addr with trampoline addr */
503 regs
->u_regs
[UREG_RETPC
] =
504 ((unsigned long)kretprobe_trampoline
) - 8;
508 * Called when the probe at kretprobe trampoline is hit
510 int __kprobes
trampoline_probe_handler(struct kprobe
*p
, struct pt_regs
*regs
)
512 struct kretprobe_instance
*ri
= NULL
;
513 struct hlist_head
*head
, empty_rp
;
514 struct hlist_node
*tmp
;
515 unsigned long flags
, orig_ret_address
= 0;
516 unsigned long trampoline_address
=(unsigned long)&kretprobe_trampoline
;
518 INIT_HLIST_HEAD(&empty_rp
);
519 kretprobe_hash_lock(current
, &head
, &flags
);
522 * It is possible to have multiple instances associated with a given
523 * task either because an multiple functions in the call path
524 * have a return probe installed on them, and/or more than one return
525 * return probe was registered for a target function.
527 * We can handle this because:
528 * - instances are always inserted at the head of the list
529 * - when multiple return probes are registered for the same
530 * function, the first instance's ret_addr will point to the
531 * real return address, and all the rest will point to
532 * kretprobe_trampoline
534 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
535 if (ri
->task
!= current
)
536 /* another task is sharing our hash bucket */
539 if (ri
->rp
&& ri
->rp
->handler
)
540 ri
->rp
->handler(ri
, regs
);
542 orig_ret_address
= (unsigned long)ri
->ret_addr
;
543 recycle_rp_inst(ri
, &empty_rp
);
545 if (orig_ret_address
!= trampoline_address
)
547 * This is the real return address. Any other
548 * instances associated with this task are for
549 * other calls deeper on the call stack
554 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
555 regs
->tpc
= orig_ret_address
;
556 regs
->tnpc
= orig_ret_address
+ 4;
558 reset_current_kprobe();
559 kretprobe_hash_unlock(current
, &flags
);
560 preempt_enable_no_resched();
562 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
563 hlist_del(&ri
->hlist
);
567 * By returning a non-zero value, we are telling
568 * kprobe_handler() that we don't want the post_handler
569 * to run (and have re-enabled preemption)
574 void kretprobe_trampoline_holder(void)
576 asm volatile(".global kretprobe_trampoline\n"
577 "kretprobe_trampoline:\n"
581 static struct kprobe trampoline_p
= {
582 .addr
= (kprobe_opcode_t
*) &kretprobe_trampoline
,
583 .pre_handler
= trampoline_probe_handler
586 int __init
arch_init_kprobes(void)
588 return register_kprobe(&trampoline_p
);
591 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
593 if (p
->addr
== (kprobe_opcode_t
*)&kretprobe_trampoline
)